repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vtsuperdarn/davitpy | davitpy/pydarn/proc/music/music.py | 2 | 85275 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
----------
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall 2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
from strongest signal to weakest, as determined by the MUSIC algorithm.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
string_info = []
if sort_key is not None:
orders = [x[sort_key] for x in signal_list]
orders.sort()
for order in orders:
for sig in signal_list:
if sig[sort_key] == order:
string_info.append(stringify_signal(sig))
signal_list.remove(sig)
else:
for sig in signal_list:
string_info.append(stringify_signal(sig))
return string_info
class SigDetect(object):
"""Class to hold information about detected signals.
Methods
-------
string
reorder
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self):
pass
def string(self):
"""Method to convert a list of signal dictionaries into strings.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
return stringify_signal_list(self.info)
def reorder(self):
"""Method to sort items in .info by signal maximum value (from the scaled kArr) and update nrSignals.
Written by Nathaniel A. Frissell, Fall 2013
"""
#Do the sorting...
from operator import itemgetter
newlist = sorted(self.info,key=itemgetter('max'),reverse=True)
#Put in the order numbers...
order = 1
for item in newlist:
item['order'] = order
order = order + 1
#Save the list to the dataObj...
self.info = newlist
#Update the nrSigs
self.nrSigs = len(newlist)
class musicDataObj(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
time : list of datetime.datetime
list of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
comment : Optional[str]
String to be appended to the history of this object
parent : Optional[musicArray]
reference to parent musicArray object
**metadata
keywords sent to matplot lib, etc.
Attributes
----------
time : numpy.array of datetime.datetime
numpy array of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
metadata : dict
keywords sent to matplot lib, etc.
history : dict
Methods
---------
copy
setActive
nyquistFrequency
samplePeriod
applyLimits
setMetadata
printMetadata
appendHistory
printHistory
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, time, data, fov=None, comment=None, parent=0, **metadata):
self.parent = parent
self.time = np.array(time)
self.data = np.array(data)
self.fov = fov
self.metadata = {}
for key in metadata: self.metadata[key] = metadata[key]
self.history = {datetime.datetime.now():comment}
def copy(self,newsig,comment):
"""Copy a musicDataObj object. This deep copies data and metadata, updates the serial
number, and logs a comment in the history. Methods such as plot are kept as a reference.
Parameters
----------
newsig : str
Name for the new musicDataObj object.
comment : str
Comment describing the new musicDataObj object.
Returns
-------
newsigobj : musicDataObj
Copy of the original musicDataObj with new name and history entry.
Written by Nathaniel A. Frissell, Fall 2013
"""
serial = self.metadata['serial'] + 1
newsig = '_'.join(['DS%03d' % serial,newsig])
setattr(self.parent,newsig,copy.copy(self))
newsigobj = getattr(self.parent,newsig)
newsigobj.time = copy.deepcopy(self.time)
newsigobj.data = copy.deepcopy(self.data)
newsigobj.fov = copy.deepcopy(self.fov)
newsigobj.metadata = copy.deepcopy(self.metadata)
newsigobj.history = copy.deepcopy(self.history)
newsigobj.metadata['dataSetName'] = newsig
newsigobj.metadata['serial'] = serial
newsigobj.history[datetime.datetime.now()] = '['+newsig+'] '+comment
return newsigobj
def setActive(self):
"""Sets this signal as the currently active signal.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.parent.active = self
def nyquistFrequency(self,timeVec=None):
"""Calculate the Nyquist frequency of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
nq : float
Nyquist frequency of the signal in Hz.
Written by Nathaniel A. Frissell, Fall 2013
"""
dt = self.samplePeriod(timeVec=timeVec)
nyq = float(1. / (2*dt))
return nyq
def samplePeriod(self,timeVec=None):
"""Calculate the sample period of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
samplePeriod : float
samplePeriod: sample period of signal in seconds.
Written by Nathaniel A. Frissell, Fall 2013
"""
if timeVec == None: timeVec = self.time
diffs = np.diff(timeVec)
diffs_unq = np.unique(diffs)
self.diffs = diffs_unq
if len(diffs_unq) == 1:
samplePeriod = diffs[0].total_seconds()
else:
diffs_sec = np.array([x.total_seconds() for x in diffs])
maxDt = np.max(diffs_sec)
avg = np.mean(diffs_sec)
md = self.metadata
warn = 'WARNING'
if md.has_key('title'): warn = ' '.join([warn,'FOR','"'+md['title']+'"'])
logging.warning(warn + ':')
logging.warning(' Date time vector is not regularly sampled!')
logging.warning(' Maximum difference in sampling rates is ' + str(maxDt) + ' sec.')
logging.warning(' Using average sampling period of ' + str(avg) + ' sec.')
samplePeriod = avg
import ipdb; ipdb.set_trace()
return samplePeriod
def applyLimits(self,rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment='Limits Applied'):
"""Removes data outside of the rangeLimits, gateLimits, and timeLimits boundaries.
Parameters
----------
rangeLimits : Optional[interable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
timeLimits : Optional[]
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Returns
-------
newMusicDataObj : musicDataObj
New musicDataObj. The musicDataObj is also stored in it's parent musicArray object.
Written by Nathaniel A. Frissell, Fall 2013
"""
return applyLimits(self.parent,self.metadata['dataSetName'],rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits,newDataSetName=newDataSetName,comment=comment)
def setMetadata(self,**metadata):
"""Adds information to the current musicDataObj's metadata dictionary.
Metadata affects various plotting parameters and signal processing routinges.
Parameters
----------
**metadata :
keywords sent to matplot lib, etc.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.metadata = dict(self.metadata.items() + metadata.items())
def printMetadata(self):
"""Nicely print all of the metadata associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.metadata.keys()
keys.sort()
for key in keys:
print key+':',self.metadata[key]
def appendHistory(self,comment):
"""Add an entry to the processing history dictionary of the current musicDataObj object.
Parameters
----------
comment : string
Infomation to add to history dictionary.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.history[datetime.datetime.now()] = '['+self.metadata['dataSetName']+'] '+comment
def printHistory(self):
"""Nicely print all of the processing history associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.history.keys()
keys.sort()
for key in keys:
print key,self.history[key]
class musicArray(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
myPtr : pydarn.sdio.radDataTypes.radDataPtr
contains the pipeline to the data we are after
sTime : Optional[datetime.datetime]
start time UT (if None myPtr.sTime is used)
eTime : Optional[datetime.datetime]
end time UT (if None myPtr.eTime is used)
param : Optional[str]
Radar FIT parameter to load and process. Any appropriate attribute of the
FIT data structure is allowed.
gscat : Optional[int]
Ground scatter flag.
0: all backscatter data
1: ground backscatter only
2: ionospheric backscatter only
3: all backscatter data with a ground backscatter flag.
fovElevation : Optional[float]
Passed directly to pydarn.radar.radFov.fov()
fovModel : Optional[str]
Scatter mapping model.
GS : Ground Scatter Mapping Model. See Bristow et al. [1994] (default)
IS : Standard SuperDARN scatter mapping model.
S : Standard projection model
E1 : for Chisham E-region 1/2-hop ionospheric projection model
F1 : for Chisham F-region 1/2-hop ionospheric projection model
F3 : for Chisham F-region 1 1/2-hop ionospheric projection model
C : Chisham projection model
None : if you trust your elevation or altitude values
fovCoords : Optional[str]
Map coordinate system. WARNING: 'geo' is curently only tested coordinate system.
full_array : Optional[bool]
If True, make the data array the full beam, gate dimensions listed in the hdw.dat file.
If False, truncate the array to the maximum dimensions that there is actually data.
False will save space without throwing out any data, but sometimes it is easier to work
with the full-size array.
Attributes
----------
messages : list
prm :
Methods
-------
get_data_sets
Example
-------
#Set basic event parameters.
rad ='wal'
sTime = datetime.datetime(2011,5,9,8,0)
eTime = datetime.datetime(2011,5,9,19,0)
#Connect to a SuperDARN data source.
myPtr = pydarn.sdio.radDataOpen(sTime,rad,eTime=eTime)
#Create the musicArray Object.
dataObj = music.musicArray(myPtr,fovModel='GS')
References
----------
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,myPtr,sTime=None,eTime=None,param='p_l',gscat=1,
fovElevation=None,fovModel='GS',fovCoords='geo',full_array=False):
from davitpy import pydarn
# Create a list that can be used to store top-level messages.
self.messages = []
no_data_message = 'No data for this time period.'
# If no data, report and return.
if myPtr is None:
self.messages.append(no_data_message)
return
if sTime == None: sTime = myPtr.sTime
if eTime == None: eTime = myPtr.eTime
scanTimeList = []
dataList = []
cpidList = []
#Subscripts of columns in the dataList/dataArray
scanInx = 0
dateInx = 1
beamInx = 2
gateInx = 3
dataInx = 4
beamTime = sTime
scanNr = np.uint64(0)
fov = None
# Create a place to store the prm data.
prm = emptyObj()
prm.time = []
prm.mplgs = []
prm.nave = []
prm.noisesearch = []
prm.scan = []
prm.smsep = []
prm.mplgexs = []
prm.xcf = []
prm.noisesky = []
prm.rsep = []
prm.mppul = []
prm.inttsc = []
prm.frang = []
prm.bmazm = []
prm.lagfr = []
prm.ifmode = []
prm.noisemean = []
prm.tfreq = []
prm.inttus = []
prm.rxrise = []
prm.mpinc = []
prm.nrang = []
while beamTime < eTime:
#Load one scan into memory.
# myScan = pydarn.sdio.radDataRead.radDataReadScan(myPtr)
myScan = myPtr.readScan()
if myScan == None: break
goodScan = False # This flag turns to True as soon as good data is found for the scan.
for myBeam in myScan:
#Calculate the field of view if it has not yet been calculated.
if fov == None:
radStruct = pydarn.radar.radStruct.radar(radId=myPtr.stid)
site = pydarn.radar.radStruct.site(radId=myPtr.stid,dt=sTime)
fov = pydarn.radar.radFov.fov(frang=myBeam.prm.frang, rsep=myBeam.prm.rsep, site=site,elevation=fovElevation,model=fovModel,coords=fovCoords)
#Get information from each beam in the scan.
beamTime = myBeam.time
bmnum = myBeam.bmnum
# Save all of the radar operational parameters.
prm.time.append(beamTime)
prm.mplgs.append(myBeam.prm.mplgs)
prm.nave.append(myBeam.prm.nave)
prm.noisesearch.append(myBeam.prm.noisesearch)
prm.scan.append(myBeam.prm.scan)
prm.smsep.append(myBeam.prm.smsep)
prm.mplgexs.append(myBeam.prm.mplgexs)
prm.xcf.append(myBeam.prm.xcf)
prm.noisesky.append(myBeam.prm.noisesky)
prm.rsep.append(myBeam.prm.rsep)
prm.mppul.append(myBeam.prm.mppul)
prm.inttsc.append(myBeam.prm.inttsc)
prm.frang.append(myBeam.prm.frang)
prm.bmazm.append(myBeam.prm.bmazm)
prm.lagfr.append(myBeam.prm.lagfr)
prm.ifmode.append(myBeam.prm.ifmode)
prm.noisemean.append(myBeam.prm.noisemean)
prm.tfreq.append(myBeam.prm.tfreq)
prm.inttus.append(myBeam.prm.inttus)
prm.rxrise.append(myBeam.prm.rxrise)
prm.mpinc.append(myBeam.prm.mpinc)
prm.nrang.append(myBeam.prm.nrang)
#Get the fitData.
fitDataList = getattr(myBeam.fit,param)
slist = getattr(myBeam.fit,'slist')
gflag = getattr(myBeam.fit,'gflg')
if len(slist) > 1:
for (gate,data,flag) in zip(slist,fitDataList,gflag):
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
elif len(slist) == 1:
gate,data,flag = (slist[0],fitDataList[0],gflag[0])
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
else:
continue
if goodScan:
#Determine the start time for each scan and save to list.
scanTimeList.append(min([x.time for x in myScan]))
#Advance to the next scan number.
scanNr = scanNr + 1
#Convert lists to numpy arrays.
timeArray = np.array(scanTimeList)
dataListArray = np.array(dataList)
# If no data, report and return.
if dataListArray.size == 0:
self.messages.append(no_data_message)
return
#Figure out what size arrays we need and initialize the arrays...
nrTimes = int(np.max(dataListArray[:,scanInx]) + 1)
if full_array:
nrBeams = int(fov.beams.max() + 1)
nrGates = int(fov.gates.max() + 1)
else:
nrBeams = int(np.max(dataListArray[:,beamInx]) + 1)
nrGates = int(np.max(dataListArray[:,gateInx]) + 1)
#Make sure the FOV is the same size as the data array.
if len(fov.beams) != nrBeams:
fov.beams = fov.beams[0:nrBeams]
fov.latCenter = fov.latCenter[0:nrBeams,:]
fov.lonCenter = fov.lonCenter[0:nrBeams,:]
fov.slantRCenter = fov.slantRCenter[0:nrBeams,:]
fov.latFull = fov.latFull[0:nrBeams+1,:]
fov.lonFull = fov.lonFull[0:nrBeams+1,:]
fov.slantRFull = fov.slantRFull[0:nrBeams+1,:]
if len(fov.gates) != nrGates:
fov.gates = fov.gates[0:nrGates]
fov.latCenter = fov.latCenter[:,0:nrGates]
fov.lonCenter = fov.lonCenter[:,0:nrGates]
fov.slantRCenter = fov.slantRCenter[:,0:nrGates]
fov.latFull = fov.latFull[:,0:nrGates+1]
fov.lonFull = fov.lonFull[:,0:nrGates+1]
fov.slantRFull = fov.slantRFull[:,0:nrGates+1]
#Convert the dataListArray into a 3 dimensional array.
dataArray = np.ndarray([nrTimes,nrBeams,nrGates])
dataArray[:] = np.nan
for inx in range(len(dataListArray)):
dataArray[int(dataListArray[inx,scanInx]),int(dataListArray[inx,beamInx]),int(dataListArray[inx,gateInx])] = dataListArray[inx,dataInx]
#Make metadata block to hold information about the processing.
metadata = {}
metadata['dType'] = myPtr.dType
metadata['stid'] = myPtr.stid
metadata['name'] = radStruct.name
metadata['code'] = radStruct.code
metadata['fType'] = myPtr.fType
metadata['cp'] = myPtr.cp
metadata['channel'] = myPtr.channel
metadata['sTime'] = sTime
metadata['eTime'] = eTime
metadata['param'] = param
metadata['gscat'] = gscat
metadata['elevation'] = fovElevation
metadata['model'] = fovModel
metadata['coords'] = fovCoords
dataSet = 'DS000_originalFit'
metadata['dataSetName'] = dataSet
metadata['serial'] = 0
comment = '['+dataSet+'] '+ 'Original Fit Data'
#Save data to be returned as self.variables
setattr(self,dataSet,musicDataObj(timeArray,dataArray,fov=fov,parent=self,comment=comment))
newSigObj = getattr(self,dataSet)
setattr(newSigObj,'metadata',metadata)
#Set the new data active.
newSigObj.setActive()
#Make prm data part of the object.
self.prm = prm
def get_data_sets(self):
"""Return a sorted list of musicDataObj's contained in this musicArray.
Returns
-------
dataSets : list of str
Names of musicDataObj's contained in this musicArray.
Written by Nathaniel A. Frissell, Fall 2013
"""
attrs = dir(self)
dataSets = []
for item in attrs:
if item.startswith('DS'):
dataSets.append(item)
dataSets.sort()
return dataSets
def beamInterpolation(dataObj,dataSet='active',newDataSetName='beamInterpolated',comment='Beam Linear Interpolation'):
"""Interpolates the data in a musicArray object along the beams of the radar. This method will ensure that no
rangegates are missing data. Ranges outside of metadata['gateLimits'] will be set to 0.
The result is stored as a new musicDataObj in the given musicArray object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
currentData = getDataSet(dataObj,dataSet)
nrTimes = len(currentData.time)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for tt in range(nrTimes):
for bb in range(nrBeams):
rangeVec = currentData.fov.slantRCenter[bb,:]
input_x = copy.copy(rangeVec)
input_y = currentData.data[tt,bb,:]
#If metadata['gateLimits'], select only those measurements...
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates <= limits[1]))[0]
if len(gateInx) < 2: continue
input_x = input_x[gateInx]
input_y = input_y[gateInx]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
intFn = interp1d(input_x,input_y,bounds_error=False,fill_value=0)
interpArr[tt,bb,:] = intFn(rangeVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = interpArr
newDataSet.setActive()
def defineLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,beamLimits=None,timeLimits=None):
"""Sets the range, gate, beam, and time limits for the chosen data set. This method only changes metadata;
it does not create a new data set or alter the data in any way. If you specify rangeLimits, they will be changed to correspond
with the center value of the range cell. Gate limits always override range limits.
Use the applyLimits() method to remove data outside of the data limits.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
try:
if (rangeLimits != None) or (gateLimits != None):
if (rangeLimits != None) and (gateLimits == None):
inx = np.where(np.logical_and(currentData.fov.slantRCenter >= rangeLimits[0],currentData.fov.slantRCenter <= rangeLimits[1]))
gateLimits = [np.min(inx[1][:]),np.max(inx[1][:])]
if gateLimits != None:
rangeMin = np.int(np.min(currentData.fov.slantRCenter[:,gateLimits[0]]))
rangeMax = np.int(np.max(currentData.fov.slantRCenter[:,gateLimits[1]]))
rangeLimits = [rangeMin,rangeMax]
currentData.metadata['gateLimits'] = gateLimits
currentData.metadata['rangeLimits'] = rangeLimits
if beamLimits != None:
currentData.metadata['beamLimits'] = beamLimits
if timeLimits != None:
currentData.metadata['timeLimits'] = timeLimits
except:
logging.warning("An error occured while defining limits. No limits set. Check your input values.")
def checkDataQuality(dataObj,dataSet='active',max_off_time=10,sTime=None,eTime=None):
"""Mark the data set as bad (metadata['good_period'] = False) if the radar was not operational within the chosen time period
for a specified length of time.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
max_off_time : Optional[int/float]
Maximum length in minutes radar may remain off.
sTime : Optional[datetime.datetime]
Starting time of checking period. If None, min(currentData.time) is used.
eTime : Optional[datetime.datetime]
End time of checking period. If None, max(currentData.time) is used.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
if sTime is None:
sTime = np.min(currentData.time)
if eTime is None:
eTime = np.max(currentData.time)
time_vec = currentData.time[np.logical_and(currentData.time > sTime, currentData.time < eTime)]
time_vec = np.concatenate(([sTime],time_vec,[eTime]))
max_diff = np.max(np.diff(time_vec))
if max_diff > datetime.timedelta(minutes=max_off_time):
currentData.setMetadata(good_period=False)
else:
currentData.setMetadata(good_period=True)
return dataObj
def applyLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment=None):
"""Removes data outside of the rangeLimits and gateLimits boundaries.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Returns
-------
newData : musicDataObj
Processed version of input musicDataObj (if succeeded), or the original musicDataObj (if failed).
Written by Nathaniel A. Frissell, Fall 2013
"""
if (rangeLimits != None) or (gateLimits != None) or (timeLimits != None):
defineLimits(dataObj,dataSet='active',rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits)
currentData = getDataSet(dataObj,dataSet)
try:
#Make a copy of the current data set.
commentList = []
if (currentData.metadata.has_key('timeLimits') == False and
currentData.metadata.has_key('beamLimits') == False and
currentData.metadata.has_key('gateLimits') == False):
return currentData
newData = currentData.copy(newDataSetName,comment)
#Apply the gateLimits
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates<= limits[1]))[0]
newData.data = newData.data[:,:,gateInx]
newData.fov.gates = newData.fov.gates[gateInx]
newData.fov.latCenter = newData.fov.latCenter[:,gateInx]
newData.fov.lonCenter = newData.fov.lonCenter[:,gateInx]
newData.fov.slantRCenter = newData.fov.slantRCenter[:,gateInx]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
gateInxFull = np.append(gateInx,gateInx[-1]+1) #We need that extra gate since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[:,gateInxFull]
newData.fov.lonFull = newData.fov.lonFull[:,gateInxFull]
newData.fov.slantRFull = newData.fov.slantRFull[:,gateInxFull]
commentList.append('gate: %i,%i' % tuple(limits))
rangeLim = (np.min(newData.fov.slantRCenter), np.max(newData.fov.slantRCenter))
commentList.append('range [km]: %i,%i' % rangeLim)
#Remove limiting item from metadata.
newData.metadata.pop('gateLimits')
if newData.metadata.has_key('rangeLimits'): newData.metadata.pop('rangeLimits')
#Apply the beamLimits.
if currentData.metadata.has_key('beamLimits'):
limits = currentData.metadata['beamLimits']
beamInx = np.where(np.logical_and(currentData.fov.beams >= limits[0],currentData.fov.beams <= limits[1]))[0]
newData.data = newData.data[:,beamInx,:]
newData.fov.beams = newData.fov.beams[beamInx]
newData.fov.latCenter = newData.fov.latCenter[beamInx,:]
newData.fov.lonCenter = newData.fov.lonCenter[beamInx,:]
newData.fov.slantRCenter = newData.fov.slantRCenter[beamInx,:]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
beamInxFull = np.append(beamInx,beamInx[-1]+1) #We need that extra beam since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[beamInxFull,:]
newData.fov.lonFull = newData.fov.lonFull[beamInxFull,:]
newData.fov.slantRFull = newData.fov.slantRFull[beamInxFull,:]
commentList.append('beam: %i,%i' % tuple(limits))
#Remove limiting item from metadata.
newData.metadata.pop('beamLimits')
#Apply the time limits.
if currentData.metadata.has_key('timeLimits'):
limits = currentData.metadata['timeLimits']
timeInx = np.where(np.logical_and(currentData.time >= limits[0],currentData.time <= limits[1]))[0]
newData.data = newData.data[timeInx,:,:]
newData.time = newData.time[timeInx]
commentList.append('time: '+limits[0].strftime('%Y-%m-%d/%H:%M,')+limits[1].strftime('%Y-%m-%d/%H:%M'))
#Remove limiting item from metadata.
newData.metadata.pop('timeLimits')
#Update the history with what limits were applied.
comment = 'Limits Applied'
commentStr = '['+newData.metadata['dataSetName']+'] '+comment+': '+'; '.join(commentList)
key = max(newData.history.keys())
newData.history[key] = commentStr
logging.debug(commentStr)
newData.setActive()
return newData
except:
if hasattr(dataObj,newDataSetName): delattr(dataObj,newDataSetName)
# print 'Warning! Limits not applied.'
return currentData
def determineRelativePosition(dataObj,dataSet='active',altitude=250.):
"""Finds the center cell of the field-of-view of a musicArray data object.
The range, azimuth, x-range, and y-range from the center to each cell in the FOV
is calculated and saved to the FOV object. The following objects are added to
dataObj.dataSet:
fov.relative_centerInx: [beam, gate] index of the center cell
fov.relative_azm: Azimuth relative to center cell [deg]
fov.relative_range: Range relative to center cell [km]
fov.relative_x: X-range relative to center cell [km]
fov.relative_y: Y-range relative to center cell [km]
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
altitude : Optional[float]
altitude added to Re = 6378.1 km [km]
Returns
-------
None
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
#Get the chosen dataset.
currentData = getDataSet(dataObj,dataSet)
#Determine center beam.
ctrBeamInx = len(currentData.fov.beams)/2
ctrGateInx = len(currentData.fov.gates)/2
currentData.fov.relative_centerInx = [ctrBeamInx, ctrGateInx]
#Set arrays of lat1/lon1 to the center cell value. Use this to calculate all other positions
#with numpy array math.
lat1 = np.zeros_like(currentData.fov.latCenter)
lon1 = np.zeros_like(currentData.fov.latCenter)
lat1[:] = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
lon1[:] = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
#Make lat2/lon2 the center position array of the dataset.
lat2 = currentData.fov.latCenter
lon2 = currentData.fov.lonCenter
#Calculate the azimuth and distance from the centerpoint to the endpoint.
azm = utils.greatCircleAzm(lat1,lon1,lat2,lon2)
dist = (Re + altitude)*utils.greatCircleDist(lat1,lon1,lat2,lon2)
#Save calculated values to the current data object, as well as calculate the
#X and Y relatvie positions of each cell.
currentData.fov.relative_azm = azm
currentData.fov.relative_range = dist
currentData.fov.relative_x = dist * np.sin(np.radians(azm))
currentData.fov.relative_y = dist * np.cos(np.radians(azm))
return None
def timeInterpolation(dataObj,dataSet='active',newDataSetName='timeInterpolated',comment='Time Linear Interpolation',timeRes=10,newTimeVec=None):
"""Interpolates the data in a musicArray object to a regular time grid.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
timeRes : Optional[float]
time resolution of new time vector [seconds]
newTimeVec : Optional[list of datetime.datetime]
Sequence of datetime.datetime objects that data will be interpolated to. This overides timeRes.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
sTime = currentData.time[0]
sTime = datetime.datetime(sTime.year,sTime.month,sTime.day,sTime.hour,sTime.minute) #Make start time a round time.
fTime = currentData.time[-1]
#Create new time vector.
if newTimeVec == None:
newTimeVec = [sTime]
while newTimeVec[-1] < fTime:
newTimeVec.append(newTimeVec[-1] + datetime.timedelta(seconds=timeRes))
#Ensure that the new time vector is within the bounds of the actual data set.
newTimeVec = np.array(newTimeVec)
good = np.where(np.logical_and(newTimeVec > min(currentData.time),newTimeVec < max(currentData.time)))
newTimeVec = newTimeVec[good]
newEpochVec = utils.datetimeToEpoch(newTimeVec)
#Initialize interpolated data.
nrTimes = len(newTimeVec)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for rg in range(nrGates):
for bb in range(nrBeams):
input_x = currentData.time[:]
input_y = currentData.data[:,bb,rg]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
input_x = utils.datetimeToEpoch(input_x)
intFn = interp1d(input_x,input_y,bounds_error=False)#,fill_value=0)
interpArr[:,bb,rg] = intFn(newEpochVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.time = newTimeVec
newDataSet.data = interpArr
newDataSet.setActive()
def filterTimes(sTime,eTime,timeRes,numTaps):
"""The linear filter is going to cause a delay in the signal and also won't get to the end of the signal.
This function will calcuate the full time period of data that needs to be loaded in order to provide filtered data
for the event requested.
Parameters
----------
sTime : datetime.datetime
Start time of event.
eTime : datetime.datetime
End time of event.
timeRes : float
Time resolution in seconds of data to be sent to filter.
numtaps : int
Length of the filter
Returns
-------
newSTime, newETime : datetime.datetime, datetime.datetime
Start and end times of data that needs to be fed into the filter.
Written by Nathaniel A. Frissell, Fall 2013
"""
td = datetime.timedelta(seconds=(numTaps*timeRes/2.))
newSTime = sTime - td
newETime = eTime + td
return (newSTime, newETime)
class filter(object):
"""Filter a VT sig/sigStruct object and define a FIR filter object.
If only cutoff_low is defined, this is a high pass filter.
If only cutoff_high is defined, this is a low pass filter.
If both cutoff_low and cutoff_high is defined, this is a band pass filter.
Uses scipy.signal.firwin()
High pass and band pass filters inspired by Matti Pastell's page:
http://mpastell.com/2010/01/18/fir-with-scipy/
Metadata keys:
'filter_cutoff_low' --> cutoff_low
'filter_cutoff_high' --> cutoff_high
'filter_numtaps' --> cutoff_numtaps
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
numtaps : Optional[int]
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
If dataObj.dataSet.metadata['filter_numptaps'] is set and this keyword is None,
the metadata value will be used.
cutoff_low : Optional[float, 1D array_like or None]
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`. If None, a low-pass filter will not
be applied.
If dataObj.dataSet.metadata['filter_cutoff_low'] is set and this keyword is None,
the metadata value will be used.
cutoff_high : Optional[float, 1D array_like, or None]
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
If dataObj.dataSet.metadata['filter_cutoff_high'] is set and this keyword is None,
the metadata value will be used.
width : Optional[float]
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : Optional[string or tuple of string and parameter values]
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : Optional[bool]
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : Optional[bool]
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero is True);
nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
Attributes
----------
comment : str
cutoff_low : float, 1D array_like or None
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges).
cutoff_high : float, 1D array_like, or None
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
nyq : float
the Nyquist rate
ir :
Methods
-------
plotTransferFunction
plotImpulseResponse
filter
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, dataObj, dataSet='active', numtaps=None, cutoff_low=None, cutoff_high=None, width=None, window='blackman', pass_zero=True, scale=True,newDataSetName='filtered'):
import scipy as sp
sigObj = getattr(dataObj,dataSet)
nyq = sigObj.nyquistFrequency()
#Get metadata for cutoffs and numtaps.
md = sigObj.metadata
if cutoff_high == None:
if md.has_key('filter_cutoff_high'):
cutoff_high = md['filter_cutoff_high']
if cutoff_low == None:
if md.has_key('filter_cutoff_low'):
cutoff_low = md['filter_cutoff_low']
if numtaps == None:
if md.has_key('filter_numtaps'):
numtaps = md['filter_numtaps']
else:
logging.warning('You must provide numtaps.')
return
if cutoff_high != None: #Low pass
lp = sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_high, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
d = lp
if cutoff_low != None: #High pass
hp = -sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_low, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
hp[numtaps/2] = hp[numtaps/2] + 1
d = hp
if cutoff_high != None and cutoff_low != None:
d = -(lp+hp)
d[numtaps/2] = d[numtaps/2] + 1
d = -1.*d #Needed to correct 180 deg phase shift.
if cutoff_high == None and cutoff_low == None:
logging.warning("You must define cutoff frequencies!")
return
self.comment = ' '.join(['Filter:',window+',','Nyquist:',str(nyq),'Hz,','Cuttoff:','['+str(cutoff_low)+', '+str(cutoff_high)+']','Hz,','Numtaps:',str(numtaps)])
self.cutoff_low = cutoff_low
self.cutoff_high = cutoff_high
self.nyq = nyq
self.ir = d
self.filter(dataObj,dataSet=dataSet,newDataSetName=newDataSetName)
def __str__(self):
return self.comment
def plotTransferFunction(self,xmin=0,xmax=None,ymin_mag=-150,ymax_mag=5,ymin_phase=None,ymax_phase=None,worN=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_mag : Optional[float]
Minimum value for y-axis for the frequency response plot.
ymax_mag : Optional[float]
Maximum value for y-axis for the frequency response plot.
ymin_phase : Optional[float]
Minimum value for y-axis for the phase response plot.
ymax_phase : Optional[float]
Maximum value for y-axis for the phase response plot.
worN : Optional[int]
passed to scipy.signal.freqz()
If None, then compute at 512 frequencies around the unit circle.
If the len(filter) > 512, then compute at len(filter) frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
if worN == None:
if len(self.ir) > 512: worN = len(self.ir)
else: worN = None
else: pass
w,h = sp.signal.freqz(self.ir,1,worN=worN)
h_dB = 20 * np.log10(abs(h))
axis = fig.add_subplot(211)
#Compute frequency vector.
w = w/max(w) * self.nyq
axis.plot(w,h_dB,'.-')
#mp.axvline(x=self.fMax,color='r',ls='--',lw=2)
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_mag is not None: axis.set_ylim(ymin=ymin_mag)
if ymax_mag is not None: axis.set_ylim(ymax=ymax_mag)
axis.set_xlabel(r'Frequency (Hz)')
axis.set_ylabel('Magnitude (db)')
axis.set_title(r'Frequency response')
axis = fig.add_subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
axis.plot(w,h_Phase,'.-')
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_phase is not None: axis.set_ylim(ymin=ymin_phase)
if ymax_phase is not None: axis.set_ylim(ymax=ymax_phase)
axis.set_ylabel('Phase (radians)')
axis.set_xlabel(r'Frequency (Hz)')
axis.set_title(r'Phase response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def plotImpulseResponse(self,xmin=None,xmax=None,ymin_imp=None,ymax_imp=None,ymin_step=None,ymax_step=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_imp : Optional[float]
Minimum value for y-axis for the impulse response plot.
ymax_imp : Optional[float]
Maximum value for y-axis for the impulse response plot.
ymin_step : Optional[float]
Minimum value for y-axis for the step response plot.
ymax_step : Optional[float]
Maximum value for y-axis for the step response plot.
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
l = len(self.ir)
impulse = np.repeat(0.,l); impulse[0] =1.
x = np.arange(0,l)
response = sp.signal.lfilter(self.ir,1,impulse)
axis = fig.add_subplot(211)
axis.stem(x, response)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Impulse response')
axis = fig.add_subplot(212)
step = np.cumsum(response)
axis.stem(x, step)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Step response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def filter(self,dataObj,dataSet='active',newDataSetName='filtered'):
"""Apply the filter to a vtsig object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
sigobj = getattr(dataObj,dataSet)
vtsig = sigobj.parent
nrTimes,nrBeams,nrGates = np.shape(sigobj.data)
#Filter causes a delay in the signal and also doesn't get the tail end of the signal... Shift signal around, provide info about where the signal is valid.
shift = np.int32(-np.floor(len(self.ir)/2.))
start_line = np.zeros(nrTimes)
start_line[0] = 1
start_line = np.roll(start_line,shift)
tinx0 = abs(shift)
tinx1 = np.where(start_line == 1)[0][0]
val_tm0 = sigobj.time[tinx0]
val_tm1 = sigobj.time[tinx1]
filteredData = np.zeros_like(sigobj.data)
#Apply filter
for bm in range(nrBeams):
for rg in range(nrGates):
tmp = sp.signal.lfilter(self.ir,[1.0],sigobj.data[:,bm,rg])
tmp = np.roll(tmp,shift)
filteredData[:,bm,rg] = tmp[:]
#Create new signal object.
newsigobj = sigobj.copy(newDataSetName,self.comment)
#Put in the filtered data.
newsigobj.data = copy.copy(filteredData)
newsigobj.time = copy.copy(sigobj.time)
#Clear out ymin and ymax from metadata; make sure meta data block exists.
#If not, create it.
if hasattr(newsigobj,'metadata'):
delMeta = ['ymin','ymax','ylim']
for key in delMeta:
if newsigobj.metadata.has_key(key):
del newsigobj.metadata[key]
else:
newsigobj.metadata = {}
newsigobj.metadata['timeLimits'] = (val_tm0,val_tm1)
key = 'title'
if newsigobj.metadata.has_key(key):
newsigobj.metadata[key] = ' '.join(['Filtered',newsigobj.metadata[key]])
else:
newsigobj.metadata[key] = 'Filtered'
newsigobj.metadata['fir_filter'] = (self.cutoff_low,self.cutoff_high)
newsigobj.setActive()
def detrend(dataObj,dataSet='active',newDataSetName='detrended',comment=None,type='linear'):
"""Linearly detrend a data in a musicArray/musicDataObj object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
type : Optional[str]
The type of detrending. If type == 'linear' (default), the result of a linear least-squares fit to data
is subtracted from data. If type == 'constant', only the mean of data is subtracted.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
try:
newDataArr[:,bm,rg] = sp.signal.detrend(currentData.data[:,bm,rg],type=type)
except:
newDataArr[:,bm,rg] = np.nan
if comment == None:
comment = type.capitalize() + ' detrend (scipy.signal.detrend)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def nan_to_num(dataObj,dataSet='active',newDataSetName='nan_to_num',comment=None):
"""Convert all NANs and INFs to finite numbers using numpy.nan_to_num().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
if comment == None:
comment = 'numpy.nan_to_num'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = np.nan_to_num(currentData.data)
newDataSet.setActive()
def windowData(dataObj,dataSet='active',newDataSetName='windowed',comment=None,window='hann'):
"""Apply a window to a musicArray object. The window is calculated using scipy.signal.get_window().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
window : Optional[str]
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall,
barthann, kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
win = sp.signal.get_window(window,nrTimes,fftbins=False)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = currentData.data[:,bm,rg] * win
if comment == None:
comment = window.capitalize() + ' window applied (scipy.signal.get_window)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def calculateFFT(dataObj,dataSet='active',comment=None):
"""Calculate the spectrum of an object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Determine frequency axis.
nyq = currentData.nyquistFrequency()
freq_ax = np.arange(nrTimes,dtype='f8')
freq_ax = (freq_ax / max(freq_ax)) - 0.5
freq_ax = freq_ax * 2. * nyq
#Use complex64, not complex128! If you use complex128, too much numerical noise will accumulate and the final plot will be bad!
newDataArr= np.zeros((nrTimes,nrBeams,nrGates),dtype=np.complex64)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = sp.fftpack.fftshift(sp.fftpack.fft(currentData.data[:,bm,rg])) / np.size(currentData.data[:,bm,rg])
currentData.freqVec = freq_ax
currentData.spectrum = newDataArr
# Calculate the dominant frequency #############################################
posFreqInx = np.where(currentData.freqVec >= 0)[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) #Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) #Use the magnitude of the positive frequency data.
#Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
currentData.dominantFreq = posFreqVec[np.argmax(avg_psd)]
currentData.appendHistory('Calculated FFT')
def calculateDlm(dataObj,dataSet='active',comment=None):
"""Calculate the cross-spectral matrix of a musicaArray object. FFT must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
nCells = nrBeams * nrGates
currentData.llLookupTable = np.zeros([5,nCells])
currentData.Dlm = np.zeros([nCells,nCells],dtype=np.complex128)
#Only use positive frequencies...
posInx = np.where(currentData.freqVec > 0)[0]
#Explicitly write out gate/range indices...
llList = []
for gg in xrange(nrGates):
for bb in xrange(nrBeams):
llList.append((bb,gg))
for ll in range(nCells):
llAI = llList[ll]
ew_dist = currentData.fov.relative_x[llAI]
ns_dist = currentData.fov.relative_y[llAI]
currentData.llLookupTable[:,ll] = [ll, currentData.fov.beams[llAI[0]], currentData.fov.gates[llAI[1]],ns_dist,ew_dist]
spectL = currentData.spectrum[posInx,llAI[0],llAI[1]]
for mm in range(nCells):
mmAI = llList[mm]
spectM = currentData.spectrum[posInx,mmAI[0],mmAI[1]]
currentData.Dlm[ll,mm] = np.sum(spectL * np.conj(spectM))
currentData.appendHistory('Calculated Cross-Spectral Matrix Dlm')
def calculateKarr(dataObj,dataSet='active',kxMax=0.05,kyMax=0.05,dkx=0.001,dky=0.001,threshold=0.15):
"""Calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Cross-spectrum array Dlm must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
kxMax : Optional[float]
Maximum kx (East-West) wavenumber to calculate [rad/km]
kyMax : Optional[float]
Maximum ky (North-South) wavenumber to calculate [rad/km]
dkx : Optional[float]
kx resolution [rad/km]
dky : Optional[float]
ky resolution [rad/km]
threshold : Optional[float]
threshold of signals to detect as a fraction of the maximum eigenvalue
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Calculate eigenvalues, eigenvectors
eVals,eVecs = np.linalg.eig(np.transpose(dataObj.active.Dlm))
nkx = np.ceil(2*kxMax/dkx)
if (nkx % 2) == 0: nkx = nkx+1
kxVec = kxMax * (2*np.arange(nkx)/(nkx-1) - 1)
nky = np.ceil(2*kyMax/dky)
if (nky % 2) == 0: nky = nky+1
kyVec = kyMax * (2*np.arange(nky)/(nky-1) - 1)
nkx = int(nkx)
nky = int(nky)
xm = currentData.llLookupTable[4,:] #x is in the E-W direction.
ym = currentData.llLookupTable[3,:] #y is in the N-S direction.
threshold = 0.15
maxEval = np.max(np.abs(eVals))
minEvalsInx = np.where(eVals <= threshold*maxEval)[0]
cnt = np.size(minEvalsInx)
maxEvalsInx = np.where(eVals > threshold*maxEval)[0]
nSigs = np.size(maxEvalsInx)
if cnt < 3:
logging.warning('Not enough small eigenvalues!')
import ipdb; ipdb.set_trace()
logging.info('K-Array: ' + str(nkx) + ' x ' + str(nky))
logging.info('Kx Max: ' + str(kxMax))
logging.info('Kx Res: ' + str(dkx))
logging.info('Ky Max: ' + str(kyMax))
logging.info('Ky Res: ' + str(dky))
logging.info('')
logging.info('Signal Threshold: ' + str(threshold))
logging.info('Number of Det Signals: ' + str(nSigs))
logging.info('Number of Noise Evals: ' + str(cnt))
logging.info('Starting kArr Calculation...')
t0 = datetime.datetime.now()
def vCalc(um,v):
return np.dot( np.conj(um), v) * np.dot( np.conj(v), um)
vList = [eVecs[:,minEvalsInx[ee]] for ee in xrange(cnt)]
kArr = np.zeros((nkx,nky),dtype=np.complex64)
for kk_kx in xrange(nkx):
kx = kxVec[kk_kx]
for kk_ky in xrange(nky):
ky = kyVec[kk_ky]
um = np.exp(1j*(kx*xm + ky*ym))
kArr[kk_kx,kk_ky]= 1. / np.sum(map(lambda v: vCalc(um,v), vList))
t1 = datetime.datetime.now()
logging.info('Finished kArr Calculation. Total time: ' + str(t1-t0))
currentData.karr = kArr
currentData.kxVec = kxVec
currentData.kyVec = kyVec
currentData.appendHistory('Calculated kArr')
def simulator(dataObj, dataSet='active',newDataSetName='simulated',comment=None,keepLocalRange=True,sigs=None,noiseFactor=0):
"""Replace SuperDARN Data with simulated MSTID(s). This is useful for understanding how the signal processing
routines of this module affect ideal data.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
keepLocalRange : Optional[bool]
If true, the locations calculated for the actual radar field of view will be used. If false,
a linearly-spaced will replace the true grid.
sigs : Optional[list of tuples]
A list of tuples defining the characteristics of the simulated signal. Sample list is as follows.
If this keyword is None, the values in this sample list are used as the default values.::
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
Each signal is evaluated as a cosine and then summed together. The cosine evaluated is::
sig = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
noiseFactor : Optional[float]
Add white gaussian noise to the simulated signal. noiseFactor is a scalar such that:
noise = noiseFactor*np.random.standard_normal(nSteps)
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
#Typical TID Parameters:
# Frequency: 0.0003 mHz
# Period: 55.5 min
# H. Wavelength: 314 km
# k: 0.02 /km
if keepLocalRange == True:
nx, ny = np.shape(currentData.fov.relative_x)
xRange = np.max(currentData.fov.relative_x) - np.min(currentData.fov.relative_x)
yRange = np.max(currentData.fov.relative_y) - np.min(currentData.fov.relative_y)
xgrid = currentData.fov.relative_x
ygrid = currentData.fov.relative_y
else:
nx = 16
xRange = 800.
ny = 25
yRange = 600.
xvec = np.linspace(-xRange/2.,xRange/2.,nx)
yvec = np.linspace(-yRange/2.,yRange/2.,ny)
dx = np.diff(xvec)[0]
dy = np.diff(yvec)[0]
xaxis = np.append(xvec,xvec[-1]+dx)
yayis = np.append(yvec,yvec[-1]+dy)
xgrid = np.zeros((nx,ny))
ygrid = np.zeros((nx,ny))
for kk in xrange(nx): ygrid[kk,:] = yvec[:]
for kk in xrange(ny): xgrid[kk,:] = yvec[:]
if sigs == None:
#Set some default signals.
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
secVec = np.array(utils.datetimeToEpoch(currentData.time))
secVec = secVec - secVec[0]
nSteps = len(secVec)
dt = currentData.samplePeriod()
dataArr = np.zeros((nSteps,nx,ny))
for step in xrange(nSteps):
t = secVec[step]
for kk in xrange(len(sigs)):
amp = sigs[kk][0]
kx = sigs[kk][1]
ky = sigs[kk][2]
f = sigs[kk][3]
phi = sigs[kk][4]
dc = sigs[kk][5]
if 1./dt <= 2.*f:
logging.warning('Nyquist Violation in f.')
logging.warning('Signal #: %i' % kk)
# if 1./dx <= 2.*kx/(2.*np.pi):
# print 'WARNING: Nyquist Violation in kx.'
# print 'Signal #: %i' % kk
#
# if 1./dy <= 2.*ky/(2.*np.pi):
# print 'WARNING: Nyquist Violation in ky.'
# print 'Signal #: %i' % kk
temp = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
dataArr[step,:,:] = dataArr[step,:,:] + temp
#Signal RMS
sig_rms = np.zeros((nx,ny))
for xx in xrange(nx):
for yy in xrange(ny):
sig_rms[xx,yy] = np.sqrt(np.mean((dataArr[:,xx,yy])**2.))
noise_rms = np.zeros((nx,ny))
if noiseFactor > 0:
nf = noiseFactor
#Temporal White Noise
for xx in xrange(nx):
for yy in xrange(ny):
noise = nf*np.random.standard_normal(nSteps)
noise_rms[xx,yy] = np.sqrt(np.mean(noise**2))
dataArr[:,xx,yy] = dataArr[:,xx,yy] + noise
xx = np.arange(ny)
mu = (ny-1.)/2.
sigma2 = 10.0
sigma = np.sqrt(sigma2)
rgDist = 1./(sigma*np.sqrt(2.*np.pi)) * np.exp(-0.5 * ((xx-mu)/sigma)**2)
rgDist = rgDist / np.max(rgDist)
mask = np.zeros((nx,ny))
for nn in xrange(nx): mask[nn,:] = rgDist[:]
mask3d = np.zeros((nSteps,nx,ny))
for nn in xrange(nSteps): mask3d[nn,:,:] = mask[:]
#Apply Range Gate Dependence
dataArr = dataArr * mask3d
snr = (sig_rms/noise_rms)**2
snr_db = 10.*np.log10(snr)
if comment == None:
comment = 'Simulated data injected.'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = dataArr
newDataSet.setActive()
#OPENW,unit,'simstats.txt',/GET_LUN,WIDTH=300
#stats$ = ' Mean: ' + NUMSTR(MEAN(sig_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(sig_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(sig_rms)^2,3)
#PRINTF,unit,'SIG_RMS'
#PRINTF,unit,stats$
#PRINTF,unit,sig_rms
#
#PRINTF,unit,''
#PRINTF,unit,'NOISE_RMS'
#stats$ = ' Mean: ' + NUMSTR(MEAN(noise_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(noise_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(noise_rms)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,noise_rms
#
#PRINTF,unit,''
#PRINTF,unit,'SNR_DB'
#stats$ = ' Mean: ' + NUMSTR(MEAN(snr_db),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(snr_db),3) $
# + ' Var: ' + NUMSTR(STDDEV(snr_db)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,snr_db
#CLOSE,unit
def scale_karr(kArr):
from scipy import stats
"""Scale/normalize kArr for plotting and signal detection.
Parameters
----------
kArr : 2D numpy.array
Two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Returns
-------
data : 2D numpy.array
Scaled and normalized version of kArr.
Written by Nathaniel A. Frissell, Fall 2013
"""
data = np.abs(kArr) - np.min(np.abs(kArr))
#Determine scale for colorbar.
scale = [0.,1.]
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
return data
def detectSignals(dataObj,dataSet='active',threshold=0.35,neighborhood=(10,10)):
"""Automatically detects local maxima/signals in a calculated kArr. This routine uses the watershed
algorithm from the skimage image processing library. Results are automatically stored in
dataObj.dataSet.sigDetect.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
threshold : Optional[float]
Scaled input data must be above this value to be detected. A higher number
will reduce the number of signals detected.
neighborhood : Optional[tuple]
Local region in which to search for peaks at every point in the image/array.
(10,10) will search a 10x10 pixel area.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
################################################################################
#Feature detection...
#Now lets do a little image processing...
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
#sudo pip install cython
#sudo pip install scikit-image
data = scale_karr(currentData.karr)
mask = data > threshold
labels, nb = ndimage.label(mask)
distance = ndimage.distance_transform_edt(mask)
local_maxi = peak_local_max(distance,footprint=np.ones(neighborhood),indices=False)
markers,nb = ndimage.label(local_maxi)
labels = watershed(-distance,markers,mask=mask)
areas = ndimage.sum(mask,labels,xrange(1,labels.max()+1))
maxima = ndimage.maximum(data,labels,xrange(1, labels.max()+1))
order = np.argsort(maxima)[::-1] + 1
maxpos = ndimage.maximum_position(data,labels,xrange(1, labels.max()+1))
sigDetect = SigDetect()
sigDetect.mask = mask
sigDetect.labels = labels
sigDetect.nrSigs = nb
sigDetect.info = []
for x in xrange(labels.max()):
info = {}
info['labelInx'] = x+1
info['order'] = order[x]
info['area'] = areas[x]
info['max'] = maxima[x]
info['maxpos'] = maxpos[x]
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = currentData.dominantFreq
info['period'] = 1./currentData.dominantFreq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
sigDetect.info.append(info)
currentData.appendHistory('Detected KArr Signals')
currentData.sigDetect = sigDetect
return currentData
def add_signal(kx,ky,dataObj,dataSet='active',frequency=None):
"""Manually add a signal to the detected signal list. All signals will be re-ordered according to value in the
scaled kArr. Added signals can be distinguished from autodetected signals because
'labelInx' and 'area' will both be set to -1.
Parameters
----------
kx : float
Value of kx of new signal.
ky : float
Value of ky of new signal.
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
frequency : Optional[float]
Frequency to use to calculate period, phase velocity, etc. If None,
the calculated dominant frequency will be used.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
def find_nearest_inx(array,value):
return (np.abs(array-value)).argmin()
kx_inx = find_nearest_inx(currentData.kxVec,kx)
ky_inx = find_nearest_inx(currentData.kyVec,ky)
maxpos = (kx_inx,ky_inx)
value = data[kx_inx,ky_inx]
true_value = currentData.karr[kx_inx,ky_inx] #Get the unscaled kArr value.
if frequency == None:
freq = currentData.dominantFreq
else:
freq = frequency
info = {}
info['labelInx'] = -1
info['area'] = -1
info['order'] = -1
info['max'] = value
info['true_max'] = true_value #Unscaled kArr value
info['maxpos'] = maxpos
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = freq
info['period'] = 1./freq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
currentData.sigDetect.info.append(info)
currentData.sigDetect.reorder()
currentData.appendHistory('Appended Signal to sigDetect List')
return currentData
def del_signal(order,dataObj,dataSet='active'):
"""Remove a signal to the detected signal list.
Parameters
----------
order :
Single value of list of signal orders (ID's) to be removed from the list.
dataObj : musicArray
object
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
orderArr = np.array(order)
for item in list(currentData.sigDetect.info):
if item['order'] in orderArr:
currentData.sigDetect.info.remove(item)
currentData.sigDetect.reorder()
currentData.appendHistory('Deleted Signals from sigDetect List')
return currentData
| gpl-3.0 |
acmaheri/sms-tools | lectures/8-Sound-transformations/plots-code/stftFiltering-orchestra.py | 3 | 1648 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N/2)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, fs, w, N, H)
mY,pY = STFT.stftAnal(y, fs, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(N/2)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| agpl-3.0 |
JWarmenhoven/seaborn | seaborn/tests/test_linearmodels.py | 4 | 19116 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from nose import SkipTest
try:
import statsmodels.regression.linear_model as smlm
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
from . import PlotTestCase
from .. import linearmodels as lm
from .. import algorithms as algo
from .. import utils
from ..palettes import color_palette
rs = np.random.RandomState(0)
class TestLinearPlotter(PlotTestCase):
rs = np.random.RandomState(77)
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list("abcdefghij"), 6)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
df.loc[[10, 20, 30], 'y_na'] = np.nan
def test_establish_variables_from_frame(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y="y")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_series(self):
p = lm._LinearPlotter()
p.establish_variables(None, x=self.df.x, y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_array(self):
p = lm._LinearPlotter()
p.establish_variables(None,
x=self.df.x.values,
y=self.df.y.values)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_mix(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_bad(self):
p = lm._LinearPlotter()
with nt.assert_raises(ValueError):
p.establish_variables(None, x="x", y=self.df.y)
def test_dropna(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y_na="y_na")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y_na, self.df.y_na)
p.dropna("x", "y_na")
mask = self.df.y_na.notnull()
pdt.assert_series_equal(p.x, self.df.x[mask])
pdt.assert_series_equal(p.y_na, self.df.y_na[mask])
class TestRegressionPlotter(PlotTestCase):
rs = np.random.RandomState(49)
grid = np.linspace(-3, 3, 30)
n_boot = 100
bins_numeric = 3
bins_given = [-1, 0, 1]
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list(range(6)), 10)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
bw_err = rs.randn(6)[df.s.values] * 2
df.y += bw_err
p = 1 / (1 + np.exp(-(df.x * 2 + rs.randn(60))))
df["c"] = [rs.binomial(1, p_i) for p_i in p]
df.loc[[10, 20, 30], 'y_na'] = np.nan
def test_variables_from_frame(self):
p = lm._RegressionPlotter("x", "y", data=self.df, units="s")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_series_equal(p.units, self.df.s)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_from_series(self):
p = lm._RegressionPlotter(self.df.x, self.df.y, units=self.df.s)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.units, self.df.s)
nt.assert_is(p.data, None)
def test_variables_from_mix(self):
p = lm._RegressionPlotter("x", self.df.y + 1, data=self.df)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y + 1)
pdt.assert_frame_equal(p.data, self.df)
def test_dropna(self):
p = lm._RegressionPlotter("x", "y_na", data=self.df)
nt.assert_equal(len(p.x), pd.notnull(self.df.y_na).sum())
p = lm._RegressionPlotter("x", "y_na", data=self.df, dropna=False)
nt.assert_equal(len(p.x), len(self.df.y_na))
def test_ci(self):
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 95)
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95, x_ci=68)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 68)
@skipif(_no_statsmodels)
def test_fast_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit with the "fast" function, which just does linear algebra
yhat_fast, _ = p.fit_fast(self.grid)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_fast, yhat_smod)
@skipif(_no_statsmodels)
def test_regress_poly(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit an first-order polynomial
yhat_poly, _ = p.fit_poly(self.grid, 1)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_poly, yhat_smod)
def test_regress_logx(self):
x = np.arange(1, 10)
y = np.arange(1, 10)
grid = np.linspace(1, 10, 100)
p = lm._RegressionPlotter(x, y, n_boot=self.n_boot)
yhat_lin, _ = p.fit_fast(grid)
yhat_log, _ = p.fit_logx(grid)
nt.assert_greater(yhat_lin[0], yhat_log[0])
nt.assert_greater(yhat_log[20], yhat_lin[20])
nt.assert_greater(yhat_lin[90], yhat_log[90])
@skipif(_no_statsmodels)
def test_regress_n_boot(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
npt.assert_equal(boots_fast.shape, (self.n_boot, self.grid.size))
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
npt.assert_equal(boots_poly.shape, (self.n_boot, self.grid.size))
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
npt.assert_equal(boots_smod.shape, (self.n_boot, self.grid.size))
@skipif(_no_statsmodels)
def test_regress_without_bootstrap(self):
p = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot, ci=None)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
nt.assert_is(boots_fast, None)
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
nt.assert_is(boots_poly, None)
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
nt.assert_is(boots_smod, None)
def test_numeric_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_numeric)
npt.assert_equal(len(bins), self.bins_numeric)
npt.assert_array_equal(np.unique(x_binned), bins)
def test_provided_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
npt.assert_array_equal(np.unique(x_binned), self.bins_given)
def test_bin_results(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
nt.assert_greater(self.df.x[x_binned == 0].min(),
self.df.x[x_binned == -1].max())
nt.assert_greater(self.df.x[x_binned == 1].min(),
self.df.x[x_binned == 0].max())
def test_scatter_data(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, x_jitter=.1)
x, y = p.scatter_data
nt.assert_true((x != self.df.d).any())
npt.assert_array_less(np.abs(self.df.d - x), np.repeat(.1, len(x)))
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, y_jitter=.05)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_less(np.abs(self.df.y - y), np.repeat(.1, len(y)))
def test_estimate_data(self):
p = lm._RegressionPlotter(self.df.d, self.df.y, x_estimator=np.mean)
x, y, ci = p.estimate_data
npt.assert_array_equal(x, np.sort(np.unique(self.df.d)))
npt.assert_array_almost_equal(y, self.df.groupby("d").y.mean())
npt.assert_array_less(np.array(ci)[:, 0], y)
npt.assert_array_less(y, np.array(ci)[:, 1])
def test_estimate_cis(self):
# set known good seed to avoid the test stochastically failing
np.random.seed(123)
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=95)
_, _, ci_big = p.estimate_data
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=50)
_, _, ci_wee = p.estimate_data
npt.assert_array_less(np.diff(ci_wee), np.diff(ci_big))
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=None)
_, _, ci_nil = p.estimate_data
npt.assert_array_equal(ci_nil, [None] * len(ci_nil))
def test_estimate_units(self):
# Seed the RNG locally
np.random.seed(345)
p = lm._RegressionPlotter("x", "y", data=self.df,
units="s", x_bins=3)
_, _, ci_big = p.estimate_data
ci_big = np.diff(ci_big, axis=1)
p = lm._RegressionPlotter("x", "y", data=self.df, x_bins=3)
_, _, ci_wee = p.estimate_data
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_partial(self):
x = self.rs.randn(100)
y = x + self.rs.randn(100)
z = x + self.rs.randn(100)
p = lm._RegressionPlotter(y, z)
_, r_orig = np.corrcoef(p.x, p.y)[0]
p = lm._RegressionPlotter(y, z, y_partial=x)
_, r_semipartial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_semipartial, r_orig)
p = lm._RegressionPlotter(y, z, x_partial=x, y_partial=x)
_, r_partial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_partial, r_orig)
@skipif(_no_statsmodels)
def test_logistic_regression(self):
p = lm._RegressionPlotter("x", "c", data=self.df,
logistic=True, n_boot=self.n_boot)
_, yhat, _ = p.fit_regression(x_range=(-3, 3))
npt.assert_array_less(yhat, 1)
npt.assert_array_less(0, yhat)
@skipif(_no_statsmodels)
def test_robust_regression(self):
p_ols = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot)
_, ols_yhat, _ = p_ols.fit_regression(x_range=(-3, 3))
p_robust = lm._RegressionPlotter("x", "y", data=self.df,
robust=True, n_boot=self.n_boot)
_, robust_yhat, _ = p_robust.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(ols_yhat), len(robust_yhat))
@skipif(_no_statsmodels)
def test_lowess_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, lowess=True)
grid, yhat, err_bands = p.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(grid), len(yhat))
nt.assert_is(err_bands, None)
def test_regression_options(self):
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, order=2)
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, logistic=True)
def test_regression_limits(self):
f, ax = plt.subplots()
ax.scatter(self.df.x, self.df.y)
p = lm._RegressionPlotter("x", "y", data=self.df)
grid, _, _ = p.fit_regression(ax)
xlim = ax.get_xlim()
nt.assert_equal(grid.min(), xlim[0])
nt.assert_equal(grid.max(), xlim[1])
p = lm._RegressionPlotter("x", "y", data=self.df, truncate=True)
grid, _, _ = p.fit_regression()
nt.assert_equal(grid.min(), self.df.x.min())
nt.assert_equal(grid.max(), self.df.x.max())
class TestRegressionPlots(PlotTestCase):
rs = np.random.RandomState(56)
df = pd.DataFrame(dict(x=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.randint(0, 1, 90),
g=np.repeat(list("abc"), 30),
h=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_regplot_basic(self):
f, ax = plt.subplots()
lm.regplot("x", "y", self.df)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_regplot_selective(self):
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, scatter=False, ax=ax)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, fit_reg=False)
nt.assert_equal(len(ax.lines), 0)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, ci=None)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
def test_regplot_scatter_kws_alpha(self):
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_is(ax.collections[0]._alpha, None)
nt.assert_equal(ax.collections[0]._facecolors[0, 3], 0.5)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color,
'alpha': 0.4})
nt.assert_equal(ax.collections[0]._alpha, 0.4)
f, ax = plt.subplots()
color = 'r'
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
def test_regplot_binned(self):
ax = lm.regplot("x", "y", self.df, x_bins=5)
nt.assert_equal(len(ax.lines), 6)
nt.assert_equal(len(ax.collections), 2)
def test_lmplot_basic(self):
g = lm.lmplot("x", "y", self.df)
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_lmplot_hue(self):
g = lm.lmplot("x", "y", data=self.df, hue="h")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 2)
nt.assert_equal(len(ax.collections), 4)
def test_lmplot_markers(self):
g1 = lm.lmplot("x", "y", data=self.df, hue="h", markers="s")
nt.assert_equal(g1.hue_kws, {"marker": ["s", "s"]})
g2 = lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s"])
nt.assert_equal(g2.hue_kws, {"marker": ["o", "s"]})
with nt.assert_raises(ValueError):
lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s", "d"])
def test_lmplot_marker_linewidths(self):
if mpl.__version__ == "1.4.2":
raise SkipTest
g = lm.lmplot("x", "y", data=self.df, hue="h",
fit_reg=False, markers=["o", "+"])
c = g.axes[0, 0].collections
nt.assert_equal(c[0].get_linewidths()[0], 0)
rclw = mpl.rcParams["lines.linewidth"]
nt.assert_equal(c[1].get_linewidths()[0], rclw)
def test_lmplot_facets(self):
g = lm.lmplot("x", "y", data=self.df, row="g", col="h")
nt.assert_equal(g.axes.shape, (3, 2))
g = lm.lmplot("x", "y", data=self.df, col="u", col_wrap=4)
nt.assert_equal(g.axes.shape, (6,))
g = lm.lmplot("x", "y", data=self.df, hue="h", col="u")
nt.assert_equal(g.axes.shape, (1, 6))
def test_lmplot_hue_col_nolegend(self):
g = lm.lmplot("x", "y", data=self.df, col="h", hue="h")
nt.assert_is(g._legend, None)
def test_lmplot_scatter_kws(self):
g = lm.lmplot("x", "y", hue="h", data=self.df, ci=None)
red_scatter, blue_scatter = g.axes[0, 0].collections
red, blue = color_palette(n_colors=2)
npt.assert_array_equal(red, red_scatter.get_facecolors()[0, :3])
npt.assert_array_equal(blue, blue_scatter.get_facecolors()[0, :3])
def test_residplot(self):
x, y = self.df.x, self.df.y
ax = lm.residplot(x, y)
resid = y - np.polyval(np.polyfit(x, y, 1), x)
x_plot, y_plot = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, x_plot)
npt.assert_array_almost_equal(resid, y_plot)
@skipif(_no_statsmodels)
def test_residplot_lowess(self):
ax = lm.residplot("x", "y", self.df, lowess=True)
nt.assert_equal(len(ax.lines), 2)
x, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, np.sort(self.df.x))
def test_three_point_colors(self):
x, y = np.random.randn(2, 3)
ax = lm.regplot(x, y, color=(1, 0, 0))
color = ax.collections[0].get_facecolors()
npt.assert_almost_equal(color[0, :3],
(1, 0, 0))
| bsd-3-clause |
AliShug/RoboVis | robovis/load_histogram.py | 1 | 3645 | import numpy as np
# from matplotlib import pyplot as plt
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class RVLoadHistogram(QGraphicsView):
'''A histogram for the maximum load across the reachable area'''
def __init__(self, ik):
width = 330
height = 120
self.scene = QGraphicsScene(0,-15,width,height-15)
super(RVLoadHistogram, self).__init__(self.scene)
self.setBackgroundBrush(QBrush(Qt.white))
self.setRenderHints(QPainter.Antialiasing)
self.setFrameStyle(0)
self.setAlignment(Qt.AlignCenter)
self.setFixedSize(width, height)
self.setSceneRect(0, 0, width, height)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scale(1, -1)
self.subscribers = {
'mouseEnter' : [],
'mouseLeave' : [],
'mouseMove' : []
}
self.lines = []
self.hist = []
self.edges = []
self.config = ik.config
self.update(ik)
self.setMouseTracking(True)
def update(self, ik=None):
if ik is not None:
self.ik = ik
self.min_load = self.config['min_load'].value
for line in self.lines:
self.scene.removeItem(line)
self.lines = []
width = self.width()
height = self.height()
loads = np.ma.masked_invalid(self.ik.loads*self.ik.partial_ok)
loads = np.ma.masked_where(loads == 0, loads).compressed()
self.hist, self.edges = np.histogram(loads, bins='auto')
buckets = len(self.hist)
self.screen_step = width/np.max(self.edges)
max_count = np.max(self.hist)
# Display histogram
for i in range(buckets):
x = self.edges[i] * self.screen_step
w = max(1, (self.edges[i+1] - self.edges[i]) * self.screen_step)
l = (self.edges[i] + self.edges[i + 1]) / 2
count = self.hist[i]
if l < self.min_load:
color = QColor(100,100,100)
else:
color = QColor(200, 180, 100)
# print(count)
line = self.scene.addLine(x, 5, x, 5 + (height-5) * count/max_count, QPen(color, w))
self.lines.append(line)
# Setpoint shows the configuration's minimum load
setpoint = self.config['min_load'].value * self.screen_step
line = self.scene.addLine(setpoint, 0, setpoint, height, QPen(QColor(150, 150, 255), 2))
self.lines.append(line)
def setMinimumLoad(self, val):
self.min_load = val
self.update()
def subscribe(self, event, function):
self.subscribers[event].append(function)
def enterEvent(self, event):
for func in self.subscribers['mouseEnter']:
func(event)
def leaveEvent(self, event):
self.setMinimumLoad(self.config['min_load'].value)
for func in self.subscribers['mouseLeave']:
func(event)
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.click(event.pos())
else:
pt = self.mapToScene(event.pos())
self.setMinimumLoad(pt.x()/self.screen_step)
for func in self.subscribers['mouseMove']:
func(event)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.click(event.pos())
def click(self, pos):
pt = self.mapToScene(pos)
self.config['min_load'].value = pt.x()/self.screen_step
self.config.notifyChange()
| mit |
cgrima/rsr | rsr/fit.py | 1 | 4401 | """
Various tools for extracting signal components from a fit of the amplitude
distribution
"""
from . import pdf
from .Classdef import Statfit
import numpy as np
import time
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def param0(sample, method='basic'):
"""Estimate initial parameters for HK fitting
Arguments
---------
sample : sequence
amplitudes
Keywords
--------
method : string
method to compute the initial parameters
"""
if method == 'basic':
a = np.nanmean(sample)
s = np.nanstd(sample)
mu = 1.
return {'a':a, 's':s, 'mu':mu}
def lmfit(sample, fit_model='hk', bins='auto', p0 = None,
xtol=1e-4, ftol=1e-4):
"""Lmfit
Arguments
---------
sample : sequence
amplitudes between 0 and 1.
Keywords
--------
fit_model : string
name of the function (in pdf module) to use for the fit
bins : string
method to compute the bin width (inherited from numpy.histogram)
p0 : dict
Initial parameters. If None, estimated automatically.
xtol : float
??
ftol : float
??
Return
------
A Statfit Class
"""
start = time.time()
winsize = len(sample)
bad = False
#--------------------------------------------------------------------------
# Clean sample
#--------------------------------------------------------------------------
sample = np.array(sample)
sample = sample[np.isfinite(sample)]
if len(sample) == 0:
bad = True
sample = np.zeros(10)+1
#--------------------------------------------------------------------------
# Make the histogram
#--------------------------------------------------------------------------
# n, edges, patches = hist(sample, bins=bins, normed=True)
n, edges = np.histogram(sample, bins=bins, density=True)
# plt.clf()
x = ((np.roll(edges, -1) + edges)/2.)[0:-1]
#--------------------------------------------------------------------------
# Initial Parameters for the fit
#--------------------------------------------------------------------------
if p0 is None:
p0 = param0(sample)
prm0 = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
prm0.add('a', p0['a'], True, 0, 1, None)
prm0.add('s', p0['s'], True, 0, 1, None)
prm0.add('mu', p0['mu'], True, .5, 10, None)
prm0.add('pt', np.average(sample)**2,False, 0, 1, 'a**2+2*s**2*mu')
#if fit_model == 'hk':
# # From [Dutt and Greenleaf. 1994, eq.14]
# prm0.add('a4', np.average(sample)**4,False, 0, 1,
# '8*(1+1/mu)*s**4 + 8*s**2*s**2 + a**4')
#--------------------------------------------------------------------------
# Fit
#--------------------------------------------------------------------------
pdf2use = getattr(pdf, fit_model)
# use 'lbfgs' fit if error with 'leastsq' fit
try:
p = minimize(pdf2use, prm0, args=(x, n), method='leastsq',
xtol=xtol, ftol=ftol)
except KeyboardInterrupt:
raise
except:
print('!! Error with LEASTSQ fit, use L-BFGS-B instead')
p = minimize(pdf2use, prm0, args=(x, n), method='lbfgs')
#--------------------------------------------------------------------------
# Output
#--------------------------------------------------------------------------
elapsed = time.time() - start
values = {}
# Create values dict For lmfit >0.9.0 compatibility since it is no longer
# in the minimize output
for i in p.params.keys():
values[i] = p.params[i].value
# Results
result = Statfit(sample, pdf2use, values, p.params,
p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
p.residual, x, n, edges, bins=bins)
# Identify bad results
if bad is True:
result.success = False
result.values['a'] = 0
result.values['s'] = 0
result.values['mu'] = 0
result.values['pt'] = 0
result.chisqr = 0
result.redchi = 0
result.message = 'No valid data in the sample'
result.residual = 0
return result
| mit |
PedroMDuarte/thesis-hubbard-lda_evap | qmc.py | 1 | 16230 |
"""
This file provides a way to obtain thermodynamic quantities from an
interpolation of available QMC solutions
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font', **{'family':'serif'})
rc('text', usetex=True)
import glob
import os
import ldaconf
basedir = ldaconf.basedir
from scipy.spatial import Delaunay
from scipy.interpolate import CloughTocher2DInterpolator, LinearNDInterpolator
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
import logging
# create logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
#logger.disabled = True
def get_qty_mu( dat, mu, MUCOL, COL, **kwargs ):
# Control the interpolation between availble
# density points here
#~qtyinterp = 'nearest'
qtyinterp = 'linear'
msg = kwargs.get('msg', None)
DENSCOL = 1
ENTRCOL = 2
SPICOL = 3
CMPRCOL = 4
if COL == SPICOL:
default_minus = 1.0
default_plus = 0.0
elif COL == ENTRCOL:
default_minus = 0.0
default_plus = 0.0
elif COL == DENSCOL:
default_minus = 0.0
default_plus = 2.0
elif COL == CMPRCOL:
default_minus = 0.0
default_plus = 0.0
else:
raise ValueError("Column not defined: COL = {:d}".format(COL) )
CAREFUL = kwargs.get('careful', True)
if CAREFUL and (mu < -10. or mu > 60.):
CAREFUL = False
if qtyinterp == 'nearest':
index = np.argmin( np.abs(dat[:, MUCOL] - mu ))
qtyresult = dat[index,COL]
else:
# find the two closest chemical potentials that
# stride the point
mudat = dat[:,MUCOL]
verbose = False
if np.all(mu < mudat):
qtyresult = default_minus
if COL == DENSCOL or COL == ENTRCOL:
if verbose:
print "QTY=", COL,
print "===>>> mu={:0.2f} ".format(mu), msg
if dat[:,DENSCOL].min() < 0.1 :
qtyresult = default_minus
elif CAREFUL:
return 'out-of-bounds'
#print "====>>> BE CAREFUL : Using default density" + \
# " n=%.2f"%default_minus + \
# " at mu={:0.2f} ".format(mu),
#if msg is not None:
# print msg
#raise ValueError('density error')
elif np.all( mu > mudat):
qtyresult = default_plus
if COL == DENSCOL or COL == ENTRCOL:
if verbose:
print "QTY=", COL,
print "====>>> mu={:0.2f} ".format(mu), msg
if dat[:,DENSCOL].max() > 1.9 :
qtyresult = default_plus
elif CAREFUL:
return 'out-of-bounds'
#print "====>>> BE CAREFUL : Using default density" + \
# " n=%.2f"%default_plus + \
# " at mu={:0.2f} ".format(mu),
#if msg is not None:
# print msg
#raise ValueError('density error')
else:
# since the mu's are ordered we can do:
index0 = np.where( mudat <=mu )[0][-1]
index1 = np.where( mudat > mu )[0][0]
qty0 = dat[ index0, COL ]
qty1 = dat[ index1, COL ]
mu0 = dat[ index0, MUCOL ]
mu1 = dat[ index1, MUCOL ]
qtyresult = qty0 + (mu-mu0) * (qty1-qty0) / (mu1-mu0)
return qtyresult
#print
#print " mu = ", mu
#print "index0 = ", index0
#print "index1 = ", index1
#print "Doing linear interpolation for the qty"
#print " mu0 = ", mu0
#print " mu1 = ", mu1
#print "qty0 = ", qty0
#print "qty1 = ", qty1
#print "qtyresult = ", qtyresult
def find_closest_qmc( U=8, T=0.67, mu=4.0, **kwargs):
"""
This function finds the closest values of U and T in the QMC data
that straddle the values U and T given as arguments.
"""
nUs = 4
nTs = 3
ALLPTS = kwargs.get('ALLPTS', False)
# select which quantity will be returned, options are
# spi and entropy
QTY = kwargs.get('QTY', 'spi' )
if QTY == 'spi':
datadir = basedir + 'COMB_Final_Spi/'
elif QTY == 'entropy':
datadir = basedir + 'COMB_Final_Entr/'
elif QTY == 'density':
datadir = basedir + 'COMB_Final_Spi/'
elif QTY == 'kappa':
datadir = basedir + 'COMB_Final_Spi/'
else:
raise ValueError('Quantity not defined:' + str(QTY) )
fname = datadir + 'U*'
us = [ float(u.split('/U')[-1]) for u in glob.glob(fname) ]
du = [ np.abs(U-u) for u in us ]
index = np.argsort(du)
if ALLPTS:
Ulist0 = range(len(index))
else:
Ulist0 = range( nUs )
us = [ us[index[i]] for i in Ulist0]
#print us
#print du
#print index
#print "Closest Us = ", us
datfiles = []
for u in us:
# For the Spi and Stheta data
if QTY == 'spi' or QTY == 'density' or QTY == 'kappa':
fname = datadir + 'U{U:02d}/T*dat'.format(U=int(u))
fs = sorted(glob.glob(fname))
Ts = [ float(f.split('T')[1].split('.dat')[0]) for f in fs ]
elif QTY=='entropy':
fname = datadir + 'U{U:02d}/S*dat'.format(U=int(u))
fs = sorted(glob.glob(fname))
Ts = [ float(f.split('S')[1].split('.dat')[0]) for f in fs ]
Ts_g = [] ; Ts_l = [];
for t in Ts:
if t > T:
Ts_g.append(t)
else:
Ts_l.append(t)
order_g = np.argsort( [ np.abs( T -t ) for t in Ts_g ] )
order_l = np.argsort( [ np.abs( T -t ) for t in Ts_l ] )
try:
Tpts = [ Ts_g[ order_g[0]] , Ts_l[ order_l[0]] ]
except:
#print
#print "problem adding U=",u, "T=",Ts
#print "available T data does not stride the point"
#print "T =", T
#print "Ts =", Ts
#print "will add nearest Ts nevertheless"
Tpts = [ ]
#raise ValueError("QMC data not available.")
dT = [ np.abs( T - t) for t in Ts ]
index = np.argsort(dT)
if ALLPTS:
Tlist0 = range(len(Ts))
else:
Tlist0 = range( min(nTs , len(Ts)))
for i in Tlist0:
Tnew = Ts[index[i]]
if Tnew not in Tpts:
Tpts.append(Tnew)
for Tpt in Tpts:
index = Ts.index( Tpt )
try:
datfiles.append( [ fs[ index ], u, Ts[index] ] )
except:
print "problem adding U=",u, "T=",Ts
raise
# Need to make sure that selected T values stride both
# sides of the point
#print
#print u
#print Ts
#print dT
#print index
#print fs
# for i in range(min(3, len(Ts))):
# try:
# datfiles.append( [ fs[index[i]], u, Ts[index[i]] ] )
# except:
# print "problem adding U=",u, "T=",Ts
# raise
#
#datfiles.append( [ fs[index[1]], u, Ts[index[1]] ] )
#print datfiles
MUCOL = 0
DENSCOL = 1
ENTRCOL = 2
SPICOL = 3
CMPRCOL = 4
if QTY == 'spi':
COL = SPICOL
elif QTY == 'entropy':
COL = ENTRCOL
elif QTY == 'density':
COL = DENSCOL
elif QTY == 'kappa':
COL = CMPRCOL
msg0 = 'U={:0.2f}, T={:0.2f}'.format(U,T)
logger.debug("number of nearby points = " + str(len(datfiles)))
basedat = []
basedaterr = []
datserr = []
for mm, f in enumerate(datfiles):
# f[0] is the datafile name
# f[1] is U
# f[2] is T
radius = kwargs.get('radius', np.nan )
msg = 'U={:0.2f}, T={:0.2f}'.format(U,T) + \
' mu={:0.2f}, r={:0.2f}, Upt={:0.3f}, Tpt={:0.3f}'.\
format(mu, radius, f[1], f[2])
try:
dat = np.loadtxt(f[0])
spival = get_qty_mu( dat, mu, MUCOL, COL, msg=msg )
# Toggle the false here to plot all of the out of bounds
if spival == 'out-of-bounds':
#spival_symmetry =
logger.info('qty is out of bounds')
basedaterr.append( [f[1], f[2], np.nan] )
datserr.append( dat )
if False:
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.plot( dat[:,MUCOL], dat[:,COL], '.-')
ax.axvline( mu )
ax.text( 0.5, 1.05, msg, ha='center', va='bottom', \
transform=ax.transAxes, fontsize=6.)
if matplotlib.get_backend() == 'agg':
fig.savefig('err_mu_%02d.png'%mm, dpi=200)
plt.close(fig)
else:
plt.show()
plt.close(fig)
continue
else:
basedat.append( [f[1], f[2], spival] )
except Exception as e :
print "Failed to get data from file = ", f
# toggle plotting, not implemented yet:
if True:
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.plot( dat[:,MUCOL], dat[:,COL], '.-')
ax.axvline( mu )
ax.text( 0.5, 1.05, msg, ha='center', va='bottom', \
transform=ax.transAxes)
if matplotlib.get_backend() == 'agg':
fig.savefig('err_mu_%02d.png'%mm, dpi=200)
else:
plt.show()
raise e
logger.debug("number of nearby valid points = " + str(len(basedat)))
error = False
points = None
# MAKE THE TRIANGULATION
basedat = np.array(basedat)
Us = np.unique(basedat[:,0] )
Ts = np.unique(basedat[:,1] )
validTriang = not ( len(Us) ==1 or len(Ts) == 1 )
#print "#Us={:d}, #Ts={:d}".format( len(Us), len(Ts) )
#print msg
if validTriang:
points = _ndim_coords_from_arrays(( basedat[:,0] , basedat[:,1]))
#print "Closest dat = ", basedat
#finterp = CloughTocher2DInterpolator(points, basedat[:,2])
finterp = LinearNDInterpolator( points, basedat[:,2] )
else:
logerr = 'not enough finterp points, QTY=%s'%QTY + '\n' + msg + '\n' \
+ "number of basedat pts = " + str(len(basedat))
print basedat
print "len Us = ", len(Us)
print "len Ts = ", len(Ts)
print "len 'out-of-bounds' = ", len( basedaterr )
if len( basedaterr ) > 0:
for bb, bdaterr in enumerate(basedaterr):
msgbb = 'U={:0.2f}, T={:0.2f}'.format(U,T) +\
' mu={:0.2f}, r={:0.2f}, Upt={:0.3f}, Tpt={:0.3f}'.\
format(mu, radius, basedaterr[bb][0], basedaterr[bb][1] )
daterr = datserr[bb]
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.plot( daterr[:,MUCOL], daterr[:,COL], '.-')
ax.axvline( mu )
ax.text( 0.5, 1.05, msgbb, ha='center', va='bottom', \
transform=ax.transAxes, fontsize=6.)
if matplotlib.get_backend() == 'agg':
fig.savefig('err_mu_%02d.png'%bb, dpi=200)
plt.close(fig)
else:
plt.show()
plt.close(fig)
logger.exception(logerr)
raise ValueError('finterp')
if points == None:
logger.warning( "points object is None" )
if error == False:
try:
result = finterp( U,T )
if np.isnan(result):
if U >= 30.0 and U <=32.5:
result = finterp( 29.99, T )
logger.warning(" qmc: U={:0.1f} replaced to U=29.99 ".\
format(U) )
if np.isnan(result):
raise Exception("\n!!!! qmc: Invalid result, QTY:%s!!!!\n"%QTY \
+ msg0)
except Exception as e:
if kwargs.get('error_nan', False):
return np.nan
else:
error = True
logger.exception("Invalid QTY result!")
if error == False:
if result >= 8. and QTY == 'spi' :
print " Obtained Spi > 8. : U={:0.2f}, T={:0.2f}, mu={:0.2f}".\
format( U, T, mu ),
print " ==> Spi={:0.2f}".format(float(result))
error = True
elif result >=4. and QTY == 'entropy':
print " Obtained Ent > 4. : U={:0.2f}, T={:0.2f}, mu={:0.2f}".\
format( U, T, mu ),
print " ==> Result={:0.2f}".format(float(result))
error = True
logger.debug("error status = " + str(error))
if error or kwargs.get('showinterp',False):
logger.debug("Inside error if statement...")
if kwargs.get('error_nan', False):
pass
#return np.nan
#print "Interp points:"
#print basedat
if len(basedat) == 0 and len(basedaterr) > 0 :
basedaterr = np.array(basedaterr)
Userr = np.unique(basedaterr[:,0] )
Tserr = np.unique(basedaterr[:,1] )
validTriangerr = not ( len(Userr) ==1 or len(Tserr) == 1 )
points = _ndim_coords_from_arrays(( basedaterr[:,0] , basedaterr[:,1]))
tri = Delaunay(points)
else:
tri = Delaunay(points)
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.triplot(points[:,0], points[:,1], tri.simplices.copy())
ax.plot(points[:,0], points[:,1], 'o')
ax.plot( U, T, 'o', ms=6., color='red')
xlim = ax.get_xlim()
dx = (xlim[1]-xlim[0])/10.
ax.set_xlim( xlim[0]-dx, xlim[1]+dx )
ylim = ax.get_ylim()
dy = (ylim[1]-ylim[0])/10.
ax.set_ylim( ylim[0]-dy, ylim[1]+dy )
ax.set_xlabel('$U/t$')
ax.set_ylabel('$T/t$',rotation=0,labelpad=8)
tt = kwargs.get('title_text','')
ax.set_title( tt + '$U/t={:.2f}$'.format(U) + \
',\ \ ' + '$T/t={:.2f}$'.format(T), \
ha='center', va='bottom', fontsize=10)
save_err = kwargs.get('save_err',None)
if save_err is not None:
print "Saving png."
fig.savefig( save_err, dpi=300)
if matplotlib.get_backend() == 'agg':
fig.savefig('err.png', dpi=200)
print "Saved error to err.png"
else:
plt.show()
if not kwargs.get('single', False):
raise ValueError("Could not interpolate using QMC data.")
if ALLPTS:
if 'savepath' in kwargs.keys():
fig.savefig( kwargs.get('savepath',None) , dpi=300)
if error:
raise
return result
| mit |
phobson/statsmodels | docs/sphinxext/numpy_ext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
buntyke/GPy | doc/sphinxext/ipython_directive.py | 12 | 27263 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
try:
import matplotlib
matplotlib.use('Agg')
except ImportError:
print "Couldn't find matplotlib"
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| mit |
michalkurka/h2o-3 | h2o-py/h2o/estimators/svd.py | 2 | 17854 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OSingularValueDecompositionEstimator(H2OEstimator):
"""
Singular Value Decomposition
"""
algo = "svd"
supervised_learning = False
def __init__(self,
model_id=None, # type: Optional[Union[None, str, H2OEstimator]]
training_frame=None, # type: Optional[Union[None, str, H2OFrame]]
validation_frame=None, # type: Optional[Union[None, str, H2OFrame]]
ignored_columns=None, # type: Optional[List[str]]
ignore_const_cols=True, # type: bool
score_each_iteration=False, # type: bool
transform="none", # type: Literal["none", "standardize", "normalize", "demean", "descale"]
svd_method="gram_s_v_d", # type: Literal["gram_s_v_d", "power", "randomized"]
nv=1, # type: int
max_iterations=1000, # type: int
seed=-1, # type: int
keep_u=True, # type: bool
u_name=None, # type: Optional[str]
use_all_factor_levels=True, # type: bool
max_runtime_secs=0.0, # type: float
export_checkpoints_dir=None, # type: Optional[str]
):
"""
:param model_id: Destination id for this model; auto-generated if not specified.
Defaults to ``None``.
:type model_id: Union[None, str, H2OEstimator], optional
:param training_frame: Id of the training data frame.
Defaults to ``None``.
:type training_frame: Union[None, str, H2OFrame], optional
:param validation_frame: Id of the validation data frame.
Defaults to ``None``.
:type validation_frame: Union[None, str, H2OFrame], optional
:param ignored_columns: Names of columns to ignore for training.
Defaults to ``None``.
:type ignored_columns: List[str], optional
:param ignore_const_cols: Ignore constant columns.
Defaults to ``True``.
:type ignore_const_cols: bool
:param score_each_iteration: Whether to score during each iteration of model training.
Defaults to ``False``.
:type score_each_iteration: bool
:param transform: Transformation of training data
Defaults to ``"none"``.
:type transform: Literal["none", "standardize", "normalize", "demean", "descale"]
:param svd_method: Method for computing SVD (Caution: Randomized is currently experimental and unstable)
Defaults to ``"gram_s_v_d"``.
:type svd_method: Literal["gram_s_v_d", "power", "randomized"]
:param nv: Number of right singular vectors
Defaults to ``1``.
:type nv: int
:param max_iterations: Maximum iterations
Defaults to ``1000``.
:type max_iterations: int
:param seed: RNG seed for k-means++ initialization
Defaults to ``-1``.
:type seed: int
:param keep_u: Save left singular vectors?
Defaults to ``True``.
:type keep_u: bool
:param u_name: Frame key to save left singular vectors
Defaults to ``None``.
:type u_name: str, optional
:param use_all_factor_levels: Whether first factor level is included in each categorical expansion
Defaults to ``True``.
:type use_all_factor_levels: bool
:param max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
Defaults to ``0.0``.
:type max_runtime_secs: float
:param export_checkpoints_dir: Automatically export generated models to this directory.
Defaults to ``None``.
:type export_checkpoints_dir: str, optional
"""
super(H2OSingularValueDecompositionEstimator, self).__init__()
self._parms = {}
self._id = self._parms['model_id'] = model_id
self.training_frame = training_frame
self.validation_frame = validation_frame
self.ignored_columns = ignored_columns
self.ignore_const_cols = ignore_const_cols
self.score_each_iteration = score_each_iteration
self.transform = transform
self.svd_method = svd_method
self.nv = nv
self.max_iterations = max_iterations
self.seed = seed
self.keep_u = keep_u
self.u_name = u_name
self.use_all_factor_levels = use_all_factor_levels
self.max_runtime_secs = max_runtime_secs
self.export_checkpoints_dir = export_checkpoints_dir
self._parms["_rest_version"] = 99
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator()
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame')
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> train, valid = arrests.split_frame(ratios=[.8])
>>> fit_h2o = H2OSingularValueDecompositionEstimator()
>>> fit_h2o.train(x=list(range(4)),
... training_frame=train,
... validation_frame=valid)
>>> fit_h2o
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
self._parms["validation_frame"] = H2OFrame._validate(validation_frame, 'validation_frame')
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool``, defaults to ``True``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(ignore_const_cols=False,
... nv=4)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool``, defaults to ``False``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... score_each_iteration=True)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def transform(self):
"""
Transformation of training data
Type: ``Literal["none", "standardize", "normalize", "demean", "descale"]``, defaults to ``"none"``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_iterations=2000)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("transform")
@transform.setter
def transform(self, transform):
assert_is_type(transform, None, Enum("none", "standardize", "normalize", "demean", "descale"))
self._parms["transform"] = transform
@property
def svd_method(self):
"""
Method for computing SVD (Caution: Randomized is currently experimental and unstable)
Type: ``Literal["gram_s_v_d", "power", "randomized"]``, defaults to ``"gram_s_v_d"``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(svd_method="power")
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("svd_method")
@svd_method.setter
def svd_method(self, svd_method):
assert_is_type(svd_method, None, Enum("gram_s_v_d", "power", "randomized"))
self._parms["svd_method"] = svd_method
@property
def nv(self):
"""
Number of right singular vectors
Type: ``int``, defaults to ``1``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_iterations=2000)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("nv")
@nv.setter
def nv(self, nv):
assert_is_type(nv, None, int)
self._parms["nv"] = nv
@property
def max_iterations(self):
"""
Maximum iterations
Type: ``int``, defaults to ``1000``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_iterations=2000)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("max_iterations")
@max_iterations.setter
def max_iterations(self, max_iterations):
assert_is_type(max_iterations, None, int)
self._parms["max_iterations"] = max_iterations
@property
def seed(self):
"""
RNG seed for k-means++ initialization
Type: ``int``, defaults to ``-1``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4, seed=-3)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def keep_u(self):
"""
Save left singular vectors?
Type: ``bool``, defaults to ``True``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(keep_u=False)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("keep_u")
@keep_u.setter
def keep_u(self, keep_u):
assert_is_type(keep_u, None, bool)
self._parms["keep_u"] = keep_u
@property
def u_name(self):
"""
Frame key to save left singular vectors
Type: ``str``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(u_name="fit_h2o")
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o.u_name
>>> fit_h2o
"""
return self._parms.get("u_name")
@u_name.setter
def u_name(self, u_name):
assert_is_type(u_name, None, str)
self._parms["u_name"] = u_name
@property
def use_all_factor_levels(self):
"""
Whether first factor level is included in each categorical expansion
Type: ``bool``, defaults to ``True``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(use_all_factor_levels=False)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("use_all_factor_levels")
@use_all_factor_levels.setter
def use_all_factor_levels(self, use_all_factor_levels):
assert_is_type(use_all_factor_levels, None, bool)
self._parms["use_all_factor_levels"] = use_all_factor_levels
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float``, defaults to ``0.0``.
:examples:
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_runtime_secs=25)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
:examples:
>>> import tempfile
>>> from os import listdir
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> checkpoints_dir = tempfile.mkdtemp()
>>> fit_h2o = H2OSingularValueDecompositionEstimator(export_checkpoints_dir=checkpoints_dir,
... seed=-5)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> len(listdir(checkpoints_dir))
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
def init_for_pipeline(self):
"""
Returns H2OSVD object which implements fit and transform method to be used in sklearn.Pipeline properly.
All parameters defined in self.__params, should be input parameters in H2OSVD.__init__ method.
:returns: H2OSVD object
:examples:
>>> from h2o.transforms.preprocessing import H2OScaler
>>> from h2o.estimators import H2ORandomForestEstimator
>>> from h2o.estimators import H2OSingularValueDecompositionEstimator
>>> from sklearn.pipeline import Pipeline
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> pipe = Pipeline([("standardize", H2OScaler()),
... ("svd", H2OSingularValueDecompositionEstimator(nv=3).init_for_pipeline()),
... ("rf", H2ORandomForestEstimator(seed=42,ntrees=50))])
>>> pipe.fit(arrests[1:], arrests[0])
"""
import inspect
from h2o.transforms.decomposition import H2OSVD
# check which parameters can be passed to H2OSVD init
var_names = list(dict(inspect.getmembers(H2OSVD.__init__.__code__))['co_varnames'])
parameters = {k: v for k, v in self._parms.items() if k in var_names}
return H2OSVD(**parameters)
| apache-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/neighbors/__init__.py | 71 | 1025 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
from .lof import LocalOutlierFactor
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest',
'LocalOutlierFactor']
| mit |
idlead/scikit-learn | examples/text/document_classification_20newsgroups.py | 27 | 10521 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
cokelaer/colormap | src/colormap/colors.py | 1 | 32584 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# This file is part of the colormap software
#
# Copyright (c) 2011-20134
#
# File author(s): Thomas Cokelaer <cokelaer@gmail.com>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# Website: https://github.com/cokelaer/colormap
# Documentation: http://packages.python.org/colormap
#
##############################################################################
"""Utilities provided in this module can be found either in the
standard Python module called :mod:`colorsys` or in matplotlib.colors
(e.g rgb2hex) or are original to this module (e.g., rgb2huv)
"""
# matplotlib dependence is only inside Colormap class
import colorsys
from easydev.tools import check_param_in_list, swapdict, check_range
from colormap.xfree86 import XFree86_colors
__all__ = ["HEX", "Color", "hex2web", "web2hex", "hex2rgb", "hex2dec",
"rgb2hex", "rgb2hsv", "hsv2rgb", "rgb2hls", "hls2rgb","yuv2rgb", "rgb2yuv",
"to_intensity", "yuv2rgb_int", "rgb2yuv_int", "Colormap"
]
def hex2web(hexa):
"""Convert hexadecimal string (6 digits) into *web* version (3 digits)
.. doctest::
>>> from colormap.colors import hex2web
>>> hex2web("#FFAA11")
'#FA1'
.. seealso:: :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
hexa = HEX().get_standard_hex_color(hexa)
return "#" + hexa[1::2]
def web2hex(web):
"""Convert *web* hexadecimal string (3 digits) into 6 digits version
.. doctest::
>>> from colormap.colors import web2hex
>>> web2hex("#FA1")
'#FFAA11'
.. seealso:: :func:`hex2web`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
return HEX().get_standard_hex_color(web)
def hex2rgb(hexcolor, normalise=False):
"""This function converts a hex color triplet into RGB
Valid hex code are:
* #FFF
* #0000FF
* 0x0000FF
* 0xFA1
.. doctest::
>>> from colormap.colors import hex2rgb
>>> hex2rgb("#FFF", normalise=False)
(255, 255, 255)
>>> hex2rgb("#FFFFFF", normalise=True)
(1.0, 1.0, 1.0)
.. seealso:: :func:`hex2web`, :func:`web2hex`,
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
hexcolor = HEX().get_standard_hex_color(hexcolor)[1:]
r, g, b = int(hexcolor[0:2], 16), int(hexcolor[2:4], 16), int(hexcolor[4:6], 16)
if normalise:
r, g, b = _normalise(r, g, b)
return r, g, b
def rgb2hex(r, g, b, normalised=False):
"""Convert RGB to hexadecimal color
:param: can be a tuple/list/set of 3 values (R,G,B)
:return: a hex vesion ofthe RGB 3-tuple
.. doctest::
>>> from colormap.colors import rgb2hex
>>> rgb2hex(0,0,255, normalised=False)
'#0000FF'
>>> rgb2hex(0,0,1, normalised=True)
'#0000FF'
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
if normalised:
r, g, b = _denormalise(r, g, b, mode="rgb")
r = int(r)
g = int(g)
b = int(b)
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
return '#%02X%02X%02X' % (r, g, b)
def rgb2hls(r, g, b, normalised=True):
"""Convert an RGB value to an HLS value.
:param bool normalised: if *normalised* is True, the input RGB triplet
should be in the range 0-1 (0-255 otherwise)
:return: the HLS triplet. If *normalised* parameter is True, the output
triplet is in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
.. doctest::
>>> from colormap.colors import rgb2hls
>>> rgb2hls(255,255,255, normalised=False)
(0.0, 1.0, 0.0)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`hsv2rgb`,
:func:`hls2rgb`
"""
# rgb_to_hsv expects normalised values !
if normalised:
upper = 1
else:
upper = 255
check_range(r, 0, upper)
check_range(g, 0, upper)
check_range(b, 0, upper)
if normalised==False:
r, g, b = _normalise(r, g, b)
h, l, s = colorsys.rgb_to_hls(r, g, b)
return h, l, s
def rgb2hsv(r, g, b, normalised=True):
"""Convert an RGB value to an HSV value.
:param bool normalised: if *normalised* is True, the input RGB triplet
should be in the range 0-1 (0-255 otherwise)
:return: the HSV triplet. If *normalised* parameter is True, the output
triplet is in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
.. doctest::
>>> from colormap.colors import rgb2hsv
>>> rgb2hsv(0.5,0,1)
(0.75, 1, 1)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
# rgb_to_hsv expects normalised values !
if normalised:
upper = 1
else:
upper = 255
check_range(r, 0, upper)
check_range(g, 0, upper)
check_range(b, 0, upper)
if normalised==False:
r, g, b = _normalise(r, g, b)
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return h,s,v
def hsv2rgb(h, s, v, normalised=True):
"""Convert a hue-saturation-value (HSV) value to a red-green-blue (RGB).
:param bool normalised: If *normalised* is True, the input HSV triplet
should be in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
:return: the RGB triplet. The output
triplet is in the range 0-1 whether the input is normalised or not.
.. doctest::
>>> from colormap.colors import hsv2rgb
>>> hsv2rgb(0.5,1,1, normalised=True) # doctest: +SKIP
(0, 1, 1)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`rgb2hls`,
:func:`hls2rgb`
.. seealso:: :func:`rgb2hex`
"""
if normalised:
upper = 1
else:
upper = 100
if normalised:
uppera = 1
else:
uppera = 360
check_range(h, 0, uppera)
check_range(s, 0, upper)
check_range(v, 0, upper)
if normalised == False:
h, s, v = _normalise(h, s, v, mode="hsv")
return colorsys.hsv_to_rgb(h, s, v)
def hls2rgb(h, l, s, normalised=True):
"""Convert an HLS value to a RGB value.
:param bool normalised: If *normalised* is True, the input HLS triplet
should be in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
:return: the RGB triplet. The output
triplet is in the range 0-1 whether the input is normalised or not.
.. doctest::
>>> from colormap.colors import hls2rgb
>>> hls2rgb(360, 50, 60, normalised=False) # doctest: +SKIP
(0.8, 0.2, 0.2)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
"""
if normalised:
upper = 1
else:
upper = 100
if normalised:
uppera = 1
else:
uppera = 360
check_range(h, 0, uppera)
check_range(s, 0, upper)
check_range(l, 0, upper)
if normalised == False:
h, l, s = _normalise(h, l, s, mode="hls")
return colorsys.hls_to_rgb(h, l, s)
def hex2dec(data):
"""convert hexadecimal string (data) into a float in the [0-65536] inclusive range"""
if data[0] == '#':
data.replace('#', '')
return int(data, 16)/255.
def rgb2yuv(r, g, b):
"""Convert RGB triplet into YUV
:return: YUV triplet with values between 0 and 1
`YUV wikipedia <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 1
.. note:: the constants referenc used is Rec. 601
"""
check_range(r, 0, 1)
check_range(g, 0, 1)
check_range(b, 0, 1)
#y = int(0.299 * r + 0.587 * g + 0.114 * b)
#u = int(-0.14713 * r + -0.28886 * g + 0.436 * b)
#v = int(0.615 * r + -0.51499 * g + -0.10001 * b)
y = 0.299 * r + 0.587 * g + 0.114 * b
u = -32591.0/221500.0 * r + -63983.0/221500.0 * g + 0.436 * b
v = 0.615 * r + -72201./140200 * g + -7011/70100. * b
return (y, u, v)
def yuv2rgb(y, u, v):
"""Convert YUV triplet into RGB
`YUV <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 255 (not normalised)
"""
check_range(y, 0,1)
check_range(u, 0, 1)
check_range(v, 0, 1)
A, B, C, D = 701.0/615.0, 25251.0/63983.0, 209599.0/361005.0, 443.0/218.0
r = y + A * v
g = y - B * u - C * v
b = y + D * u
return (r, g, b)
def rgb2yuv_int(r, g, b):
"""Convert RGB triplet into YUV
`YUV wikipedia <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 255 (not normalised)
"""
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
y = int(0.299 * r + 0.587 * g + 0.114 * b)
u = int(-32591.0/221500.0 * r + -63983.0/221500.0 * g + 0.436 * b)
v = int(0.615 * r + -72201./140200 * g + -7011/70100. * b)
return (y, u, v)
def yuv2rgb_int(y, u, v):
"""Convert YUV triplet into RGB
`YUV <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 255 (not normalised)
"""
check_range(y, 0, 255)
check_range(u, 0, 255)
check_range(v, 0, 255)
r = int(y + 1.13983 * v)
g = int(y - 0.39465 * u - 0.58060 * v)
b = int(y + 2.03211 * u)
return (r, g, b)
def _denormalise(r, g, b, mode="rgb"):
check_param_in_list(mode, ["rgb", "hls", "hsv"])
if mode == "rgb":
return r*255., g*255., b*255.
elif mode in ["hls", "hsv"]:
return r*360., g*100., b*100.
def _normalise(r, g, b, mode="rgb"):
check_param_in_list(mode, ["rgb", "hls", "hsv"])
if mode == "rgb":
return r/255., g/255., b/255.
elif mode in ["hls", "hsv"]:
return r/360., g/100., b/100.
def to_intensity(n):
"""Return intensity
:param n: value between 0 and 1
:return: value between 0 and 255; round(n*127.5+127.5)
"""
check_range(n, 0, 1)
return int(round(n * 127.5 + 127.5))
class HEX(object):
"""Class to check the validity of an hexadecimal string and get standard string
By standard, we mean #FFFFFF (6 digits)
::
>>> h = HEX()
>>> h.is_valid_hex_color("#FFFF00")
True
"""
def __init__(self):
pass
def is_valid_hex_color(self, value, verbose=True):
"""Return True is the string can be interpreted as hexadecimal color
Valid formats are
* #FFF
* #0000FF
* 0x0000FF
* 0xFA1
"""
try:
self.get_standard_hex_color(value)
return True
except Exception as err:
if verbose:
print(err)
return False
def get_standard_hex_color(self, value):
"""Return standard hexadecimal color
By standard, we mean a string that starts with # sign followed by 6
character, e.g. #AABBFF
"""
if isinstance(value, str)==False:
raise TypeError("value must be a string")
if len(value) <= 3:
raise ValueError("input string must be of type 0xFFF, 0xFFFFFF or #FFF or #FFFFFF")
if value.startswith("0x") or value.startswith("0X"):
value = value[2:]
elif value.startswith("#"):
value = value[1:]
else:
raise ValueError("hexa string must start with a '#' sign or '0x' string")
value = value.upper()
# Now, we have either FFFFFF or FFF
# now check the length
for x in value:
if x not in "0123456789ABCDEF":
raise ValueError("Found invalid hexa character {0}".format(x))
if len(value) == 6 or len(value) == 8:
value = "#" + value[0:6]
elif len(value) == 3:
value = "#" + value[0]*2 + value[1]*2 + value[2]*2
else:
raise ValueError("hexa string should be 3, 6 or 8 digits. if 8 digits, last 2 are ignored")
return value
class Color(HEX):
"""Class to ease manipulation and conversion between color codes
You can create an instance in many differen ways. You can either use a
human-readable name as long as it is part of the
`XFree86 list <http://en.wikipedia.org/wiki/X11_color_names>`_
You can also provide a hexadecimal string (either 3 or 6 digits). You can
use triplets of values corresponding to the RGB, HSV or HLS conventions.
Here are some examples:
.. doctest::
from colormap import Color
Color("red") # human XFree86 compatible representation
Color("#f00") # standard 3 hex digits
Color("#ff0000") # standard 6 hex digits
Color(hsv=(0,1,0.5))
Color(hls=(0, 1, 0.5)) # HLS triplet
Color(rgb=(1, 0, 0)) # RGB triplet
Color(Color("red")) # using an instance of :class:`Color`
Note that the RGB, HLS and HSV triplets use normalised values. If you need
to normalise the triplet, you can use :mod:`colormap.colors._normalise` that
provides a function to normalise RGB, HLS and HSV triplets::
colors._normalise(*(255, 255, 0), mode="rgb")
colors._normalise(*(360, 50, 100), mode="hls")
If you provide a string, it has to be a valid string from XFree86.
In addition to the official names, the lower case names are valid. Besides,
there are names with spaces. The equivalent names without space are also
valid. Therefore the name "Spring Green", which is an official name can be
provided as "Spring Green", "spring green", "springgreen" or "SpringGreen".
"""
# Get official color names
colors = XFree86_colors.copy()
# add color names without spaces
aliases = dict([(x.replace(" ", ""),x) for x in colors.keys() if " " in x])
# add color names without spaces in lower cases
aliases.update([(x.replace(" ", "").lower(),x) for x in colors.keys() if " " in x])
# add color names in lower case
aliases.update(dict([(x.lower(),x) for x in colors.keys()]))
aliases.update(dict([(x,x) for x in colors.keys()]))
# keep track of all possible names
color_names = sorted(list(set(list(colors.keys()) +list( aliases.keys()))))
def __init__(self, name=None, rgb=None, hls=None, hsv=None):
super(Color, self).__init__()
self._name = None
self._mode = None
self._rgb = None
# Does the user provided the name argument (first one) as a string ?
if isinstance(name, str):
# if so, it can be a valid human name (e.g., red) or an hex
# assuming that valid hexadecimal starts with # or 0x,
# if we can interpret the string as an hexadecimal, we are done
if self.is_valid_hex_color(name, verbose=False):
self.hex = name
else:
# if not, then, the user probably provided a valid color name
# the property will check the validity.
self.name = name[:]
#all other input parameters are ignored
elif name == None:
if rgb:
self.rgb = rgb
elif hls:
self.hls = hls
elif hsv:
self.hsv = hsv
else:
raise ValueError("You must set one of the parameter")
elif isinstance(name, Color):
self.rgb = name.rgb
else:
raise ValueError("name parameter must be a string")
def _get_name(self):
return self._name
def _set_name(self, name):
check_param_in_list(name, self.color_names)
name = self.aliases[name]
self._name = name
# set hex and rgb at the same time based on the name
self.hex = self.colors[name]
name = property(_get_name, _set_name)
color = property(_get_name, _set_name)
def _get_hex(self):
return self._hex
def _set_hex(self, value):
# hex is an approximation made of 255 bits so do not define rgb here
if self.is_valid_hex_color(value):
value = self.get_standard_hex_color(value)
self._hex = value
if self._hex in self.colors.values():
self._name = swapdict(self.colors, check_ambiguity=False)[self._hex]
else:
self._name = "undefined"
self._rgb = hex2rgb(self._hex, normalise=True)
else:
# just to warn the user
self.get_standard_hex_color(value)
hex = property(_get_hex, _set_hex,
doc="getter/setter the hexadecimal value.")
def _get_rgb(self):
return self._rgb
def _set_rgb(self, value):
# set name, hex and rgb
self.hex = rgb2hex(*value , normalised=True)
# must reset rgb with its real value (set_hex may round the rgb)
# in _set_hex
self._rgb = value
rgb = property(_get_rgb, _set_rgb,
doc="getter/setter the RGB values (3-length tuple)")
def _get_hsv(self):
hsv = rgb2hsv(*self.rgb)
return hsv
def _set_hsv(self, value):
# TODO: value must be normalised
self.rgb = hsv2rgb(*value)
hsv = property(_get_hsv, _set_hsv,
doc="getter/setter the HSV values (3-length tuple)")
def _get_hls(self):
hls = rgb2hls(*self.rgb)
return hls
def _set_hls(self, value):
#hls = _normalise(*value, mode="hls")
#else:
hls = value
self.rgb = hls2rgb(*hls)
hls = property(_get_hls, _set_hls,
doc="getter/setter the HLS values (3-length tuple)")
def _get_lightness(self):
return self.hls[1]
def _set_lightness(self, lightness):
h, l, s = self.hls
self.hls = (h, lightness, s)
lightness = property(_get_lightness, _set_lightness,
doc="getter/setter the lightness in the HLS triplet")
def _get_saturation_hls(self):
return self.hls[2]
def _set_saturation_hls(self, saturation):
h, l, s = self.hls
self.hls = (h, l, saturation)
saturation_hls = property(_get_saturation_hls, _set_saturation_hls,
doc="getter/setter the saturation in the HLS triplet")
def _get_hue(self):
return self.hls[0]
def _set_hue(self, hue):
h, l, s = self.hls
self.hls = (hue, l, s)
hue = property(_get_hue, _set_hue,
doc="getter/setter the saturation in the HLS triplet")
def _get_red(self):
return self.rgb[0]
def _set_red(self, red):
r, g, b = self.rgb
self.rgb = (red,g,b)
red = property(_get_red, _set_red,
doc="getter/setter for the red color in RGB triplet")
def _get_green(self):
return self.rgb[1]
def _set_green(self, green):
r, g, b = self.rgb
self.rgb = (r, green, b)
green = property(_get_green, _set_green,
doc="getter/setter for the green color in RGB triplet")
def _get_blue(self):
return self.rgb[2]
def _set_blue(self, blue):
r, g, b = self.rgb
self.rgb = (r, g, blue)
blue = property(_get_blue, _set_blue,
doc="getter/setter for the blue color in RGB triplet")
def _get_value(self):
return self.hls[0]
def _set_value(self, value):
h, s, v = self.hsv
self.hsv = (h, s, value)
value = property(_get_value, _set_value,
doc="getter/setter the value in the HSV triplet")
def _get_yiq(self):
return colorsys.rgb_to_yiq(*self.rgb)
yiq = property(_get_yiq, doc="Getter for the YIQ triplet")
def __str__(self):
txt = 'Color {0}\n'.format(self.name)
txt+= ' hexa code: {0}\n'.format(self.hex)
txt+= ' RGB code: {0}\n'.format(self.rgb)
txt+= ' RGB code (un-normalised): {0}\n\n'.format([x*255 for x in self.rgb])
txt+= ' HSV code: {0}\n'.format(self.hsv)
txt+= ' HSV code: (un-normalised) {0} {1} {2}\n\n'.format(self.hsv[0]*360, self.hsv[1]*100, self.hsv[2]*100)
txt+= ' HLS code: {0}\n'.format(self.hls)
txt+= ' HLS code: (un-normalised) {0} {1} {2}\n\n'.format(self.hls[0]*360, self.hls[1]*100, self.hls[2]*100)
return txt
class Colormap(object):
"""Class to create matplotlib colormap
This example show how to get the pre-defined colormap called *heat*
.. plot::
:include-source:
from pylab import *
from colormap.colors import Colormap
c = Colormap()
cmap = c.get_cmap_heat()
c.test_colormap(cmap)
You may be more interested in building your own colormap::
# design your own colormap
d = {'blue': [0,0,0,1,1,1,0],
'green':[0,1,1,1,0,0,0],
'red': [1,1,0,0,0,1,1]}
cmap = c.cmap(d, reverse=False)
# see the results
c.test_colormap(cmap)
If you want a simple linear colormap, you can use the example above,
or use the :meth:`cmap_linear`. For instance for a diverging colormap
from red to green (with with color in between)::
cmap = c.cmap_linear("red", "white", "green")
c.test_colormap(cmap)
Even simpler, you can use a bicolor colormap :meth:`cmap_bicolor`. For instance
for a red to green colormap::
cmap = c.cmap_bicolor("red", "green")
c.test_colormap(cmap)
From matplotlib documentation, colormaps falls into 4 categories:
#. Sequential schemes for unipolar data that progresses from low to high
#. Diverging schemes for bipolar data that emphasizes positive or
negative deviations from acentral value
#. Cyclic schemes meant for plotting values that wrap around at the
endpoints, such as phase angle, wind direction, or time of day
#. Qualitative schemes for nominal data that has no inherent ordering,
where color is used only to distinguish categories
:references: matplotlib documentation and examples
http://matplotlib.org/examples/color/colormaps_reference.html
"""
def _get_colormap_mpl(self):
try:
from matplotlib.pyplot import colormaps as _cmaps
return _cmaps()
except:
return []
colormaps = property(_get_colormap_mpl)
def _get_sequentials(self):
return ['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'OrRd',
'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
sequentials = property(_get_sequentials)
def _get_sequentials2(self):
return ['afmhot', 'autumn', 'bone', 'cool', 'copper',
'gist_heat', 'gray', 'hot', 'pink',
'spring', 'summer', 'winter']
sequentials2 = property(_get_sequentials2)
def _get_diverging(self):
return ['BrBG', 'PRGn', 'PiYG', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu',
'RdYlGn', 'Spectral', 'bwr', 'coolwarm', 'seismic']
diverging = property(_get_diverging)
def _get_diverging_black(self):
return ['red_black_sky', 'red_black_blue', 'red_black_green', 'yellow_black_blue',
'yellow_black_sky', 'red_black_orange', 'pink_black_green(w3c)'
]
diverging_black = property(_get_diverging_black)
def _get_qualitative(self):
return ['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3']
qualitative = property(_get_qualitative)
def _get_misc(self):
return ['gist_earth', 'terrain', 'ocean', 'gist_stern',
'brg', 'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
misc = property(_get_misc)
def plot_rgb_from_hex_list(self, cols):
"""This functions takes a list of hexadecimal values and plots
the RGB curves. This can be handy to figure out the RGB functions
to be used in the :meth:`get_cmap`.
.. plot::
:include-source:
:width: 60%
from colormap.colors import Colormap
c = Colormap()
t = ['#FF0000FF', '#FF4D00FF', '#FF9900FF', '#FFE500FF',
'#CCFF00FF', '#80FF00FF', '#33FF00FF', '#00FF19FF',
'#00FF66FF', '#00FFB2FF', '#00FFFFFF', '#00B3FFFF',
'#0066FFFF', '#001AFFFF', '#3300FFFF', '#7F00FFFF',
'#CC00FFFF','#FF00E6FF','#FF0099FF', '#FF004DFF']
c.plot_rgb_from_hex_list(t)
"""
import pylab
red = [hex2rgb(x)[0]/255. for x in cols]
blue = [hex2rgb(x)[2]/255. for x in cols]
green = [hex2rgb(x)[1]/255. for x in cols]
x = pylab.linspace(0, 1, len(cols))
pylab.clf()
pylab.plot(x, red, 'ro-', alpha=0.5)
pylab.plot(x, green, 'gs-', alpha=0.5, markersize=15)
pylab.plot(x, blue, 'bx-', alpha=0.5, markersize=15)
pylab.ylim([-0.1, 1.1])
def cmap_bicolor(self, color1, color2, reverse=False, N=256):
"""Provide 3 colors in format accepted by :class:`Color`
::
>>> red = Color('red')
>>> white = Color('white')
>>> cmap = cmap_bicolor(red, white)
"""
c1 = Color(color1)
c2 = Color(color2)
dico = {'red': [c1.red, c2.red],
'green':[c1.green, c2.green],
'blue':[c1.blue, c2.blue]}
return self.cmap(dico, reverse=reverse, N=N)
def cmap_linear(self, color1, color2, color3, reverse=False, N=256):
"""Provide 3 colors in format accepted by :class:`Color`
::
red = Color('red')
cmap = cmap_linear(red, 'white', '#0000FF')
"""
c1 = Color(color1)
c2 = Color(color2)
c3 = Color(color3)
dico = {'red': [c1.red, c2.red, c3.red],
'green':[c1.green, c2.green, c3.green],
'blue':[c1.blue, c2.blue, c3.blue]}
return self.cmap(dico, reverse=reverse, N=N)
def cmap(self, colors=None, reverse=False, N=256):
"""Return a colormap object to be used within matplotlib
:param dict colors: a dictionary that defines the RGB colors to be
used in the colormap. See :meth:`get_cmap_heat` for an example.
:param bool reverse: reverse the colormap is set to True (defaults to False)
:param int N: Defaults to 50
"""
# matplotlib colormaps
if colors in self.colormaps:
if reverse and colors.endswith("_r") is False:
colors += "_r"
from matplotlib.cm import get_cmap
return get_cmap(colors)
# custom ones
elif colors in self.diverging_black:
c1, c2, c3 = colors.split("_")
# special case of sky, which does not exists
c3 = c3.replace("sky", "deep sky blue")
return self.cmap_linear(c1, c2, c3)
elif colors == 'heat':
return self.get_cmap_heat()
elif colors == 'heat_r':
return self.get_cmap_heat_r()
# Keep these dependencies inside the function to allow
# installation of colormap without those dependencies
# FIXME remove numpy dependencies
import numpy as np
# extracted from R, heat.colors(20)
if reverse:
for k in colors.keys():
colors[k].reverse()
# If index not given, RGB colors are evenly-spaced in colormap.
index = np.linspace(0, 1, len(colors['red']))
# Adapt color_data to the form expected by LinearSegmentedColormap.
color_data = dict((key, [(x, y, y) for x, y in zip(index, value)])
for key, value in list(colors.items()))
import matplotlib
f = matplotlib.colors.LinearSegmentedColormap
m = f('my_color_map', color_data, N)
return m
def get_cmap_heat(self):
"""Return a heat colormap matplotlib-compatible colormap
This heat colormap should be equivalent to heat.colors() in R.
::
>>> from colormap.colors import Colormap
>>> cmap = Colormap.get_cmap_heat()
You can generate the colormap based solely on this information for the RGB
functions along::
d= { 'blue':[0,0,0,0,1],
'green':[0,.35,.7,1,1],
'red':[1,1,1,1,1]}
cmap = Colormap.get_cmap(d)
"""
return self.cmap(
{ 'blue':[0, 0, 0, 0, 1],
'green':[0, .35, .7, 1, 1],
'red':[1, 1, 1, 1, 1]}, reverse=False)
def get_cmap_heat_r(self):
"""Return a heat colormap matplotlib-compatible colormap
Same as :meth:`get_cmap_heat` but reversed
"""
return self.cmap(
{ 'blue':[0, 0, 0, 0, 1],
'green':[0, .35, .7, 1, 1],
'red':[1, 1, 1, 1, 1]}, reverse=True)
def get_cmap_rainbow(self):
"""colormap similar to rainbow colormap from R
.. note:: The red is actually appearing on both sides... Yet
this looks like what is coded in R 3.0.1
"""
return self.cmap(
{ 'blue': [0, 0, 0, 1, 1, 1, 0],
'green':[0, 1, 1, 1, 0, 0, 0],
'red': [1, 1, 0, 0, 0, 1, 1]}, reverse=False)
def get_cmap_red_green(self):
return self.cmap(
{ 'green': [0, 0.4, 0.6, .75, .8, .9, 1, .9, .8, .6],
'blue' : [0, .4, .6, .75, .8, .7, .6, .35, .17, .1],
'red': [1, 1, 1, 1, 1, .9, .8, .6, .3, .1]}, reverse=True)
def test_colormap(self, cmap=None):
"""plot one colormap for testing
By default, test the :meth:`get_cmap_heat`
"""
if cmap is None:
cmap = self.get_cmap_heat()
import numpy as np
from pylab import clf, pcolor, colorbar, show, linspace, axis
A, B = np.meshgrid(linspace(0, 10, 100), linspace(0, 10, 100))
clf()
pcolor((A-5)**2+(B-5)**2, cmap=cmap)
colorbar()
show()
axis('off')
def plot_colormap(self, cmap_list=None):
"""cmap_list list of valid cmap or name of a set (sequential,
diverging,)
if none, plot all known colors
.. .. plot::
.. :width:80%
.. :include-source:
.. from colormap import Colormap
.. c = Colormap()
.. c.plot_colormap('sequential')
"""
from pylab import subplots
if isinstance(cmap_list, str):
if cmap_list in ['sequentials','sequentials2','qualitative',
'misc','diverging', 'diverging_black']:
cmap_list = getattr(self, cmap_list)
else:
cmap_list = [cmap_list]
if isinstance(cmap_list, list) is not True:
raise TypeError("""input must be a list of srtings or a single string. Each string should be found. For a user-defined cmap, use test_colormap""")
for this in cmap_list:
if this not in self.colormaps and this not in self.diverging_black:
raise ValueError("unknown colormap name. Please check valid names in colormaps attribute")
nrows = len(cmap_list)
gradient = [x/255. for x in range(0,256)]
gradient = [gradient, gradient]
#np.vstack((gradient, gradient))
fig, axes = subplots(nrows=nrows)
fig.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.8)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=self.cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[2] + 0.08
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='left', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
| bsd-3-clause |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_chi_square.py | 1 | 1627 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.statistical_based import chi_square
def main():
# load data
mat = scipy.io.loadmat('../data/BASEHOCK.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the chi-square score of each feature
score = chi_square.chi_square(X, y)
# rank features in descending order according to score
idx = chi_square.feature_ranking(score)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | mit |
jarvisqi/nlp_learn | gensim/text.py | 1 | 2054 | import jieba
import pandas as pd
from gensim import corpora, models, similarities
# 训练样本
raw_documents = [
'0无偿居间介绍买卖毒品的行为应如何定性',
'1吸毒男动态持有大量毒品的行为该如何认定',
'2如何区分是非法种植毒品原植物罪还是非法制造毒品罪',
'3为毒贩贩卖毒品提供帮助构成贩卖毒品罪',
'4将自己吸食的毒品原价转让给朋友吸食的行为该如何认定',
'5为获报酬帮人购买毒品的行为该如何认定',
'6毒贩出狱后再次够买毒品途中被抓的行为认定',
'7虚夸毒品功效劝人吸食毒品的行为该如何认定',
'8妻子下落不明丈夫又与他人登记结婚是否为无效婚姻',
'9一方未签字办理的结婚登记是否有效',
'10夫妻双方1990年按农村习俗举办婚礼没有结婚证 一方可否起诉离婚',
'11结婚前对方父母出资购买的住房写我们二人的名字有效吗',
'12身份证被别人冒用无法登记结婚怎么办?',
'13同居后又与他人登记结婚是否构成重婚罪',
'14未办登记只举办结婚仪式可起诉离婚吗',
'15同居多年未办理结婚登记,是否可以向法院起诉要求离婚'
]
def main():
corpora_documents = []
for item_text in raw_documents:
item_str = list(jieba.cut(item_text))
corpora_documents.append(item_str)
dictionary = corpora.Dictionary(corpora_documents)
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
similarity =similarities.Similarity('-Similarity-index', corpus, num_features=400)
test_data_1 = '你好,我想问一下我想离婚他不想离,孩子他说不要,是六个月就自动生效离婚'
test_cut_raw_1 = jieba.cut(test_data_1)
test_corpus_1 = dictionary.doc2bow(test_cut_raw_1)
similarity.num_best = 5
# 返回最相似的样本材料,(index_of_document, similarity) tuples
print(similarity[test_corpus_1])
if __name__ == '__main__':
main()
| mit |
Pyomo/PyomoGallery | test_notebooks.py | 1 | 4863 | #
# Jupyter notebook testing logic adapted from
# https://gist.github.com/lheagy/f216db7220713329eb3fc1c2cd3c7826
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Lindsey Heagy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Raw
import unittest
import sys
import os
import subprocess
try:
import jupyter
jupyter_available = True
except:
jupyter_available = False
try:
import pandas
pandas_available = True
except:
pandas_available = False
try:
import networkx
networkx_available = True
except:
networkx_available = False
timeout=120
requires_pandas = set(['max_flow_interdict', 'min_cost_flow_interdict', 'multi_commodity_flow_interdict', 'sp_interdict', 'min_cost_flow', 'mst'])
requires_networkx = set(['mst'])
# Testing for the notebooks - use nbconvert to execute all cells of the
# notebook
# For testing on TravisCI, be sure to include a requirements.txt that
# includes jupyter so that you run on the most up-to-date version.
# Where are the notebooks?
TESTDIR = os.path.dirname(os.path.abspath(__file__))
#NBDIR = os.path.sep.join(TESTDIR.split(os.path.sep)[:-2] + ['notebooks/']) # where are the notebooks?
def setUp():
nbpaths = [] # list of notebooks, with file paths
nbnames = [] # list of notebook names (for making the tests)
print(TESTDIR)
# walk the test directory and find all notebooks
for dirname, dirnames, filenames in os.walk(TESTDIR):
for filename in filenames:
if filename.endswith('.ipynb') and not filename.endswith('-checkpoint.ipynb'):
nbpaths.append(os.path.abspath(dirname) + os.path.sep + filename) # get abspath of notebook
nbnames.append(''.join(filename[:-6])) # strip off the file extension
return nbpaths, nbnames
def get(nbname, nbpath):
# use nbconvert to execute the notebook
def test_func(self):
print('\n--------------- Testing {0} ---------------'.format(nbname))
print(' {0}'.format(nbpath))
if not jupyter_available:
self.skipTest("Jupyter unavailable")
if nbname in requires_pandas and not pandas_available:
self.skipTest("Pandas unavailable")
if nbname in requires_networkx and not networkx_available:
self.skipTest("Networkx unavailable")
# execute the notebook using nbconvert to generate html
dir_=os.path.dirname(nbpath)
os.chdir(dir_)
nbexe = subprocess.Popen(
[ 'jupyter', 'nbconvert', '{0}'.format(nbpath),
'--execute',
'--inplace',
'--ExecutePreprocessor.kernel_name=python%s' % (
{2:"",3:"3"}[sys.version_info[0]], ),
'--ExecutePreprocessor.timeout='+str(timeout)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = nbexe.communicate()
check = nbexe.returncode
if check == 0:
print('\n ..... {0} Passed ..... \n'.format(nbname))
# if passed remove the generated html file
#subprocess.call(['rm', '{0}.html'.format( os.path.sep.join(os.getcwd().split(os.path.sep) + [nbpath.split(os.path.sep)[-1][:-6]]))])
else:
print('\n <<<<< {0} FAILED >>>>> \n'.format(nbname))
print('Captured Output: \n {0}'.format(err))
self.assertEqual(check, 0)
return test_func
class TestNotebooks(unittest.TestCase):
pass
nbpaths, nbnames = setUp()
# Check for duplicates
tmp = set()
for name in nbnames:
if name in tmp:
raise IOError("ERROR: duplicate test name %s" % name)
tmp.add(name)
# build test for each notebook
for i, nb in enumerate(nbnames):
#print((i,nb,nbpaths[i]))
setattr(TestNotebooks, 'test_'+nb, get(nb, nbpaths[i]))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
manazhao/tf_recsys | tensorflow/examples/learn/multiple_gpu.py | 13 | 4153 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/gpu:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/gpu:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3)
# and with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
scikit-garden/scikit-garden | skgarden/quantile/tests/test_tree.py | 1 | 2933 | import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from numpy.testing import assert_array_almost_equal
from skgarden.quantile import DecisionTreeQuantileRegressor
from skgarden.quantile import ExtraTreeQuantileRegressor
from skgarden.quantile.utils import weighted_percentile
boston = load_boston()
X, y = boston.data, boston.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.6, test_size=0.4, random_state=0)
X_train = np.array(X_train, dtype=np.float32)
X_test = np.array(X_test, dtype=np.float32)
estimators = [
DecisionTreeQuantileRegressor(random_state=0),
ExtraTreeQuantileRegressor(random_state=0)
]
def test_quantiles():
# Test with max depth 1.
for est in estimators:
est.set_params(max_depth=1)
est.fit(X_train, y_train)
tree = est.tree_
for q in [20, 40, 50, 60, 80, 90]:
left_ind = X_train[:, tree.feature[0]] <= tree.threshold[0]
right_ind = X_train[:, tree.feature[0]] > tree.threshold[0]
# fixme
left_q = weighted_percentile(y_train[left_ind], q)
right_q = weighted_percentile(y_train[right_ind], q)
for curr_X, curr_y in [[X_train, y_train], [X_test, y_test]]:
actual_q = np.zeros(curr_X.shape[0])
left_ind = curr_X[:, tree.feature[0]] <= tree.threshold[0]
actual_q[left_ind] = left_q
right_ind = curr_X[:, tree.feature[0]] > tree.threshold[0]
actual_q[right_ind] = right_q
expected_q = est.predict(curr_X, quantile=q)
assert_array_almost_equal(expected_q, actual_q)
def test_max_depth_None():
# Since each leaf is pure and has just one unique value.
# the mean equals any quantile.
for est in estimators:
est.set_params(max_depth=None)
est.fit(X_train, y_train)
for quantile in [20, 40, 50, 60, 80, 90]:
for curr_X in [X_train, X_test]:
assert_array_almost_equal(
est.predict(curr_X, quantile=None),
est.predict(curr_X, quantile=quantile), 1)
def test_tree_toy_data():
rng = np.random.RandomState(0)
x1 = rng.randn(1, 10)
X1 = np.tile(x1, (10000, 1))
x2 = 20.0 * rng.randn(1, 10)
X2 = np.tile(x2, (10000, 1))
X = np.vstack((X1, X2))
y1 = rng.randn(10000)
y2 = 5.0 + rng.randn(10000)
y = np.concatenate((y1, y2))
for est in estimators:
est.set_params(max_depth=1)
est.fit(X, y)
for quantile in [20, 30, 40, 50, 60, 70, 80]:
assert_array_almost_equal(
est.predict(x1, quantile=quantile),
[np.percentile(y1, quantile)], 3)
assert_array_almost_equal(
est.predict(x2, quantile=quantile),
[np.percentile(y2, quantile)], 3)
| bsd-3-clause |
sonusz/PhasorToolBox | examples/freq_meter.py | 1 | 1820 | #!/usr/bin/env python3
"""
This is an real-time frequency meter of two PMUs.
This code connects to two PMUs, plot the frequency of the past 300 time-stamps and update the plot in real-time.
"""
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import gc
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
del(synchrophasors)
gc.collect()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
| mit |
subutai/htmresearch | projects/sequence_prediction/continuous_sequence/data/processTaxiData.py | 12 | 2451 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from pygeocoder import Geocoder
plt.ion()
year = 2015
record_num = []
aggregation_rule = {'Sum': sum}
ts_all = None
aggregation_window = "1min"
print " aggregate data at" + aggregation_window + "resolution"
for year in [2014, 2015]:
for month in xrange(1, 13):
datafileName = 'yellow_tripdata_' + str(year) + '-' + "{:0>2d}".format(month) + '.csv'
if os.path.isfile(datafileName):
print " Load Datafile: ", datafileName
# df = pd.read_csv(datafileName, header=0, nrows=100, usecols=[1, 3, 5, 6],
# names=['pickup_datetime', 'passenger_count', 'pickup_longitude', 'pickup_latitude'])
#
# postcode = np.zeros(len(df))
# for i in xrange(len(df)):
# try:
# results = Geocoder.reverse_geocode(df['pickup_latitude'][i], df['pickup_longitude'][i])
# postcode[i] = results.postal_code
# except:
# pass
df = pd.read_csv(datafileName, header=0, usecols=[1, 3], names=['pickup_datetime', 'passenger_count'])
record_num.append(len(df))
ts = pd.Series(np.array(df.passenger_count), index=pd.to_datetime(df.pickup_datetime))
del df
ts_aggregate = ts.resample(aggregation_window, how=aggregation_rule)
if ts_all is not None:
print " concat ts_all"
ts_all = pd.concat([ts_all, ts_aggregate])
else:
print " initialize ts_all"
ts_all = ts_aggregate
else:
print datafileName, " not exist"
print "include time of day and day of week as input field"
date = ts_all.index
dayofweek = (date.dayofweek)
timeofday = (date.hour*60 + date.minute)
passenger_count = np.array(ts_all['Sum'])
seq = pd.DataFrame(np.transpose(np.array([passenger_count, timeofday, dayofweek])), columns=['passenger_count', 'timeofday', 'dayofweek'], index=ts_all.index)
plt.close('all')
plt.figure(1)
plt.plot(seq.index, seq.passenger_count)
import csv
outputFileName = "nyc_taxi_" + aggregation_window + ".csv"
outputFile = open(outputFileName,"w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(['timestamp', 'passenger_count', 'timeofday', 'dayofweek'])
csvWriter.writerow(['datetime', 'int', 'int', 'string'])
csvWriter.writerow(['T', '', '', ''])
for i in range(len(ts_all)):
csvWriter.writerow([seq.index[i], seq.passenger_count[i], seq.timeofday[i], seq.dayofweek[i]])
outputFile.close() | agpl-3.0 |
mikebenfield/scikit-learn | sklearn/cluster/tests/test_spectral.py | 72 | 7950 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
fishroot/nemoa | nemoa/file/nplot.py | 1 | 16627 | # -*- coding: utf-8 -*-
"""Common function for creating plots with matplotlib."""
__author__ = 'Patrick Michl'
__email__ = 'frootlab@gmail.com'
__license__ = 'GPLv3'
__docformat__ = 'google'
import numpy as np
from nemoa.types import OptDict
class Plot:
"""Base class for matplotlib plots.
Export classes like Histogram, Heatmap or Graph share a common
interface to matplotlib, as well as certain plotting attributes.
This base class is intended to provide a unified interface to access
matplotlib and those attributes.
Attributes:
"""
_default: dict = {
'fileformat': 'pdf',
'figure_size': (10.0, 6.0),
'dpi': None,
'bg_color': 'none',
'usetex': False,
'font_family': 'sans-serif',
'style': 'seaborn-white',
'title': None,
'show_title': True,
'title_fontsize': 14.0
}
_config: dict = {}
_kwds: dict = {}
_plt = None
_fig = None
_axes = None
def __init__(self, **kwds):
""" """
try:
import matplotlib
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
# merge config from defaults, current config and keyword arguments
self._kwds = kwds
self._config = {**self._default, **self._config, **kwds}
# update global matplotlib settings
matplotlib.rc('text', usetex=self._config['usetex'])
matplotlib.rc('font', family=self._config['font_family'])
# link matplotlib.pyplot
import matplotlib.pyplot as plt
self._plt = plt
# close previous figures
plt.close('all')
# update plot settings
plt.style.use(self._config['style'])
# create figure
self._fig = plt.figure(
figsize=self._config['figure_size'],
dpi=self._config['dpi'],
facecolor=self._config['bg_color'])
# create subplot (matplotlib.axes.Axes)
self._axes = self._fig.add_subplot(111)
def set_default(self, config: OptDict = None) -> bool:
"""Set default values."""
self._config = {**self._config, **(config or {}), **self._kwds}
return True
def plot_title(self) -> bool:
"""Plot title."""
if not self._config['show_title']:
return False
title = self._config['title'] or 'Unknown'
fontsize = self._config['title_fontsize']
getattr(self._plt, 'title')(title, fontsize=fontsize)
return True
def show(self) -> None:
"""Show plot."""
getattr(self._plt, 'show')()
def save(self, path, **kwds):
"""Save plot to file."""
return self._fig.savefig(path, dpi=self._config['dpi'], **kwds)
def release(self):
"""Clear current plot."""
return self._fig.clear()
class Heatmap(Plot):
""" """
_config = {
'interpolation': 'nearest',
'grid': True
}
def plot(self, array):
""" """
try:
from matplotlib.cm import hot_r
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
# plot grid
self._axes.grid(self._config['grid'])
# plot heatmap
cax = self._axes.imshow(
array,
cmap=hot_r,
interpolation=self._config['interpolation'],
extent=(0, array.shape[1], 0, array.shape[0]))
# create labels for axis
max_font_size = 12.
x_labels = []
for label in self._config['x_labels']:
if ':' in label:
label = label.split(':', 1)[1]
x_labels.append(get_texlabel(label))
y_labels = []
for label in self._config['y_labels']:
if ':' in label:
label = label.split(':', 1)[1]
y_labels.append(get_texlabel(label))
fontsize = min(max_font_size, \
400. / float(max(len(x_labels), len(y_labels))))
self._plt.xticks(
np.arange(len(x_labels)) + 0.5,
tuple(x_labels), fontsize=fontsize, rotation=65)
self._plt.yticks(
len(y_labels) - np.arange(len(y_labels)) - 0.5,
tuple(y_labels), fontsize=fontsize)
# create colorbar
cbar = self._fig.colorbar(cax)
for tick in cbar.ax.get_yticklabels():
tick.set_fontsize(9)
# (optional) plot title
self.plot_title()
return True
class Histogram(Plot):
""" """
_config = {
'bins': 100,
'facecolor': 'lightgrey',
'edgecolor': 'black',
'histtype': 'bar',
'linewidth': 0.5,
'grid': True
}
def plot(self, array):
""" """
# plot grid
self._axes.grid(self._config['grid'])
# plot histogram
self._axes.hist(
array,
bins=self._config['bins'],
facecolor=self._config['facecolor'],
histtype=self._config['histtype'],
linewidth=self._config['linewidth'],
edgecolor=self._config['edgecolor'])
# (optional) plot title
self.plot_title()
return True
class Scatter2D(Plot):
""" """
_config = {
'grid': True,
'pca': True
}
@staticmethod
def _pca2d(array):
"""Calculate projection to largest two principal components."""
# get dimension of array
dim = array.shape[1]
# calculate covariance matrix
cov = np.cov(array.T)
# calculate eigevectors and eigenvalues
vals, vecs = np.linalg.eig(cov)
# sort eigevectors by absolute eigenvalues
pairs = [(np.abs(vals[i]), vecs[:, i]) for i in range(len(vals))]
pairs.sort(key=lambda x: x[0], reverse=True)
# calculate projection matrix
proj = np.hstack(
[pairs[0][1].reshape(dim, 1), pairs[1][1].reshape(dim, 1)])
# calculate projection
parray = np.dot(array, proj)
return parray
def plot(self, array):
""" """
# test arguments
if array.shape[1] != 2:
if self._config['pca']:
array = self._pca2d(array)
else: raise TypeError(
"first argument is required to be an array of shape (n, 2)")
x, y = array[:, 0], array[:, 1]
# plot grid
self._axes.grid(self._config['grid'])
# plot scattered data
self._axes.scatter(x, y)
# (optional) plot title
self.plot_title()
return True
class Graph(Plot):
_config = {
'padding': (0.1, 0.1, 0.1, 0.1),
'show_legend': False,
'legend_fontsize': 9.0,
'graph_layout': 'layer',
'graph_direction': 'right',
'node_style': 'o',
'edge_width_enabled': True,
'edge_curvature': 1.0
}
def plot(self, G):
"""Plot graph.
Args:
G: networkx graph instance
figure_size (tuple): figure size in inches
(11.69,8.27) for A4, (16.53,11.69) for A3
edge_attribute (string): name of edge attribute, that
determines the edge colors by its sign and the edge width
by its absolute value.
default: 'weight'
edge_color (bool): flag for colored edges
True: edge colors are determined by the sign of the
attribute 'weight'
False: edges are black
edge_poscolor (string): name of color for edges with
positive signed attribute. For a full list of specified
color names see nemoa.base.nplot.get_color()
edge_negcolor (string): name of color for edges with
negative signed attribute. For a full list of specified
color names see nemoa.base.nplot.get_color()
edge_curvature (float): value within the intervall [-1, 1],
that determines the curvature of the edges.
Thereby 1 equals max convexity and -1 max concavity.
direction (string): string within the list ['up', 'down',
'left', 'right'], that dermines the plot direction of the
graph. 'up' means, the first layer is at the bottom.
edge_style (string): '-', '<-', '<->', '->',
'<|-', '<|-|>', '-|>', '|-', '|-|', '-|',
']-', ']-[', '-[', 'fancy', 'simple', 'wedge'
Returns:
Boolen value which is True if no error occured.
"""
try:
import matplotlib.patches
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
try:
import networkx as nx
except ImportError as err:
raise ImportError(
"requires package networkx: "
"https://networkx.github.io") from err
from nemoa.base import ndict
from nemoa.math import graph
# adjust size of subplot
fig = self._fig
ax = self._axes
ax.set_autoscale_on(False)
figsize = fig.get_size_inches() * fig.dpi
ax.set_xlim(0., figsize[0])
ax.set_ylim(0., figsize[1])
ax.set_aspect('equal', 'box')
ax.axis('off')
# get node positions and sizes
layout_params = ndict.crop(self._config, 'graph_')
del layout_params['layout']
pos = graph.get_layout(
G, layout=self._config['graph_layout'], size=figsize,
padding=self._config['padding'], **layout_params)
sizes = graph.get_layout_normsize(pos)
node_size = sizes.get('node_size', None)
node_radius = sizes.get('node_radius', None)
line_width = sizes.get('line_width', None)
edge_width = sizes.get('edge_width', None)
font_size = sizes.get('font_size', None)
# get nodes and groups sorted by node attribute group_id
groups = graph.get_groups(G, attribute='group')
sorted_groups = sorted(
list(groups.keys()),
key=lambda g: 0 if not isinstance(g, list) or not g \
else G.node.get(g[0], {}).get('group_id', 0))
# draw nodes, labeled by groups
for group in sorted_groups:
gnodes = groups.get(group, [])
if not gnodes:
continue
refnode = G.node.get(gnodes[0])
label = refnode['description'] or refnode['group'] or str(group)
# draw nodes in group
node_obj = nx.draw_networkx_nodes(
G, pos, nodelist=gnodes, linewidths=line_width,
node_size=node_size, node_shape=self._config['node_style'],
node_color=get_color(refnode['color'], 'white'), label=label)
node_obj.set_edgecolor(
get_color(refnode['border_color'], 'black'))
# draw node labels
for node, data in G.nodes(data=True):
# determine label, fontsize and color
node_label = data.get('label', str(node).title())
node_label_format = get_texlabel(node_label)
node_label_size = np.sqrt(get_texlabel_width(node_label))
font_color = get_color(data['font_color'], 'black')
# draw node label
nx.draw_networkx_labels(
G, pos, labels={node: node_label_format},
font_size=font_size / node_label_size, font_color=font_color,
font_family='sans-serif', font_weight='normal')
# patch node for edges
circle = matplotlib.patches.Circle(
pos.get(node), alpha=0., radius=node_radius)
ax.add_patch(circle)
G.node[node]['patch'] = circle
# draw edges
seen = {}
if graph.is_directed(G):
default_edge_style = '-|>'
else: default_edge_style = '-'
for (u, v, data) in G.edges(data=True):
weight = data['weight']
if weight == 0.:
continue
# calculate edge curvature from node positions
# parameter rad describes the height in the normalized triangle
if (u, v) in seen:
rad = seen.get((u, v))
rad = -(rad + float(np.sign(rad)) * .2)
else:
scale = 1. / np.amax(np.array(figsize))
vec = scale * (np.array(pos[v]) - np.array(pos[u]))
rad = vec[0] * vec[1] / np.sqrt(2 * np.sum(vec ** 2))
if self._config['graph_layout'] == 'layer':
gdir = self._config['graph_direction']
if gdir in ['left', 'right']:
rad *= -1
seen[(u, v)] = rad
# determine style of edge from edge weight
if weight is None:
linestyle = '-'
linewidth = 0.5 * edge_width
alpha = 0.5
elif not self._config['edge_width_enabled']:
linestyle = '-'
linewidth = edge_width
alpha = np.amin([np.absolute(weight), 1.0])
else:
linestyle = '-'
linewidth = np.absolute(weight) * edge_width
alpha = np.amin([np.absolute(weight), 1.0])
# draw edge
node_a = G.node[u]['patch']
node_b = G.node[v]['patch']
arrow = matplotlib.patches.FancyArrowPatch(
posA=node_a.center, posB=node_b.center,
patchA=node_a, patchB=node_b,
arrowstyle=default_edge_style,
connectionstyle='arc3,rad=%s' % rad,
mutation_scale=linewidth * 12.,
linewidth=linewidth, linestyle=linestyle,
color=get_color(data.get('color', 'black')), alpha=alpha)
ax.add_patch(arrow)
# (optional) draw legend
if self._config['show_legend']:
num_groups = np.sum([1 for g in list(groups.values()) \
if isinstance(g, list) and g])
markerscale = 0.6 * self._config['legend_fontsize'] / font_size
ax.legend(
numpoints=1,
loc='lower center',
ncol=num_groups,
borderaxespad=0.,
framealpha=0.,
bbox_to_anchor=(0.5, -0.1),
fontsize=self._config['legend_fontsize'],
markerscale=markerscale)
# (optional) plot title
self.plot_title()
return True
def get_color(*args):
"""Convert color name of XKCD color name survey to RGBA tuple.
Args:
List of color names. If the list is empty, a full list of
available color names is returned. Otherwise the first valid
color in the list is returned as RGBA tuple. If no color is
valid None is returned.
"""
try:
from matplotlib import colors
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
if not args:
clist = list(colors.get_named_colors_mapping().keys())
return sorted([cname[5:].title() \
for cname in clist if cname[:5] == 'xkcd:'])
rgb = None
for cname in args:
try:
rgb = colors.to_rgb('xkcd:%s' % cname)
break
except ValueError:
continue
return rgb
def get_texlabel(string):
"""Return formated node label as used for plots."""
lstr = string.rstrip('1234567890')
if len(lstr) == len(string):
return '${%s}$' % (string)
rnum = int(string[len(lstr):])
lstr = lstr.strip('_')
return '${%s}_{%i}$' % (lstr, rnum)
def get_texlabel_width(string):
"""Return estimated width for formated node labels."""
lstr = string.rstrip('1234567890')
if len(lstr) == len(string):
return len(string)
lstr = lstr.strip('_')
rstr = str(int(string[len(lstr):]))
return len(lstr) + 0.7 * len(rstr)
def filetypes():
"""Return supported image filetypes."""
try:
import matplotlib.pyplot as plt
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
return plt.gcf().canvas.get_supported_filetypes()
| gpl-3.0 |
e-koch/VLA_Lband | 14B-088/HI/imaging/sd_regridding/sd_comparison.py | 1 | 3520 |
'''
Compare the regridded versions of the SD datasets.
'''
from spectral_cube import SpectralCube
import matplotlib.pyplot as plt
import os
from corner import hist2d
from radio_beam import Beam
import astropy.units as u
import numpy as np
from paths import fourteenB_HI_data_path, data_path
from galaxy_params import gal
# Load in the 4 cubes and run.
vla_cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
arecibo_path = os.path.join(data_path, "Arecibo")
# Spectral interpolation, followed by reprojection.
arecibo_name = \
os.path.join(arecibo_path,
"14B-088_items_new/m33_arecibo_14B088.fits")
arecibo_cube = SpectralCube.read(arecibo_name)
ebhis_path = os.path.join(data_path, "EBHIS")
# Spectral interpolation, followed by reprojection.
ebhis_name = os.path.join(ebhis_path, "14B-088_items/m33_ebhis_14B088.fits")
ebhis_cube = SpectralCube.read(ebhis_name)
gbt_path = os.path.join(data_path, "GBT")
gbt_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088.fits")
gbt_cube = SpectralCube.read(gbt_name)
gbt_lowres_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088.fits")
gbt_lowres_cube = SpectralCube.read(gbt_lowres_name)
# Compare total emission in the cubes.
vla_mask = np.isfinite(vla_cube[0])
arecibo_sum = arecibo_cube.with_mask(vla_mask).sum()
ebhis_sum = ebhis_cube.with_mask(vla_mask).sum()
gbt_sum = gbt_cube.with_mask(vla_mask).sum()
gbt_lowres_sum = gbt_lowres_cube.with_mask(vla_mask).sum()
plt.plot(arecibo_sum, ebhis_sum, gbt_sum, gbt_lowres_sum)
# Compare intensities in one plane
# arecibo_plane = arecibo_cube[500]
# ebhis_plane = ebhis_cube[500]
# gbt_plane = gbt_cube[500]
# gbt_plane[np.isnan(gbt_plane)] = 0.0 * u.K
# gbt_lowres_plane = gbt_lowres_cube[500]
# # Convolve GBT to match EBHIS
# beam_fwhm = lambda diam: ((1.2 * 21 * u.cm) / diam.to(u.cm)) * u.rad
# gbt_90m_beam = Beam(beam_fwhm(90 * u.m))
# gbt_plane._beam = gbt_90m_beam
# gbt_plane_convolved = gbt_plane.convolve_to(ebhis_plane.beam)
# gbt_100m_beam = Beam(beam_fwhm(100 * u.m))
# gbt_plane._beam = gbt_100m_beam
# gbt_plane_convolved_100 = gbt_plane.convolve_to(ebhis_plane.beam)
# ax = plt.subplot(131)
# hist2d(gbt_plane.value.ravel(), ebhis_plane.value.ravel(), ax=ax)
# plt.plot([0, 15], [0, 15])
# ax2 = plt.subplot(132)
# hist2d(gbt_plane_convolved.value.ravel(), ebhis_plane.value.ravel(), ax=ax2)
# plt.plot([0, 15], [0, 15])
# ax3 = plt.subplot(133)
# hist2d(gbt_plane_convolved_100.value.ravel(), ebhis_plane.value.ravel(), ax=ax3)
# plt.plot([0, 15], [0, 15])
# Best match for GBT is with a 106 m beam, convolved to the 80 m of EBHIS.
# Well, something is wrong here. It has to be that the difference between the
# data is a 80 m deconvolved w/ a 106 m beam. The EBHIS beam size should then
# be slightly smaller?
# Now convolve the Arecibo down to the GBT.
# gbt_90m_beam = Beam(beam_fwhm(90 * u.m))
# arecibo_plane_convolved = arecibo_plane.convolve_to(gbt_90m_beam)
# gbt_100m_beam = Beam(beam_fwhm(100 * u.m))
# arecibo_plane_convolved_100 = arecibo_plane.convolve_to(gbt_100m_beam)
# ax = plt.subplot(131)
# hist2d(arecibo_plane.value.ravel(), gbt_plane.value.ravel(), ax=ax)
# plt.plot([0, 15], [0, 15])
# ax2 = plt.subplot(132)
# hist2d(arecibo_plane_convolved.value.ravel(), gbt_plane.value.ravel(), ax=ax2)
# plt.plot([0, 15], [0, 15])
# ax3 = plt.subplot(133)
# hist2d(arecibo_plane_convolved_100.value.ravel(), gbt_plane.value.ravel(), ax=ax3)
# plt.plot([0, 15], [0, 15]) | mit |
winklerand/pandas | pandas/io/formats/printing.py | 7 | 8864 | """
printing tools
"""
import sys
from pandas.core.dtypes.inference import is_sequence
from pandas import compat
from pandas.compat import u
from pandas.core.config import get_option
def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop('strlen', len)
justfunc = kwargs.pop('justfunc', justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode='left')
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) If you need to send something to the console, use console_encode().
#
# console_encode() should (hopefully) choose the right encoding for you
# based on the encoding set in option "display.encoding"
#
# 3) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("{{{body}}}")
else:
fmt = u("[{body}]") if hasattr(seq, '__setitem__') else u("({body})")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(
next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt.format(body=body)
def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{{{things}}}")
pairs = []
pfmt = u("{key}: {val}")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs))
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
max_seq_items=max_seq_items)
elif (is_sequence(thing) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = u("'{thing}'")
else:
fmt = u("u'{thing}'")
result = fmt.format(thing=as_escaped_unicode(thing))
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
def _enable_data_resource_formatter(enable):
if 'IPython' not in sys.modules:
# definitely not in IPython
return
from IPython import get_ipython
ip = get_ipython()
if ip is None:
# still not in IPython
return
formatters = ip.display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
if enable:
if mimetype not in formatters:
# define tableschema formatter
from IPython.core.formatters import BaseFormatter
class TableSchemaFormatter(BaseFormatter):
print_method = '_repr_data_resource_'
_return_type = (dict,)
# register it:
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
transientlunatic/minke | minke/mdctools.py | 1 | 34706 | """
88b d88 88 88
888b d888 "" 88
88`8b d8'88 88
88 `8b d8' 88 88 8b,dPPYba, 88 ,d8 ,adPPYba,
88 `8b d8' 88 88 88P' `"8a 88 ,a8" a8P_____88
88 `8b d8' 88 88 88 88 8888[ 8PP"""""""
88 `888' 88 88 88 88 88`"Yba, "8b, ,aa
88 `8' 88 88 88 88 88 `Y8a `"Ybbd8"'
--------------------------------------------------------
This file is a part of Minke, a tool for generating simulated
gravitational wave signals, used for characterising and training
search algorithms.
Minke was created by Daniel Williams, based on work started by Chris
Pankow and others, and is built around the LALSimulation library.
"""
from glue.ligolw import ligolw, utils, lsctables
lsctables.use_in(ligolw.LIGOLWContentHandler);
import numpy
import lalburst, lalsimulation, lalmetaio
from minke.antenna import response
from lal import TimeDelayFromEarthCenter as XLALTimeDelayFromEarthCenter
#from pylal.xlal.datatypes.ligotimegps import LIGOTimeGPS
from lal import LIGOTimeGPS
from glue.ligolw.utils import process
import glue
import glue.ligolw
import gzip
import lal, lalframe
import numpy as np
import pandas as pd
import os
import os.path
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import random
import minke
from minke import sources
sourcemap = {}
for classin in dir(sources):
classin = sources.__dict__[classin]
if hasattr(classin, "waveform"):
sourcemap[classin.waveform] = classin
def source_from_row(row):
waveform = row.waveform
sourceobj = sourcemap[row.waveform].__new__(sourcemap[row.waveform])
sourceobj.numrel_data = str("")
params = {}
for attr in dir(row):
if not attr[0] == "_" and not attr[:3] =="get":
#print attr
try:
params[attr] = getattr(row, attr)
setattr(sourceobj, attr, getattr(row, attr))
except AttributeError:
print("Error processing the {} column".format(attr))
sourceobj.params = params
try:
sourceobj.time = row.time_geocent_gps
except:
sourceobj.time = row.geocent_start_time
pass
return sourceobj
def source_from_dict(params):
sourceobj = sourcemap[params['morphology']].__new__(sourcemap[params['morphology']])
sourceobj.numrel_data = str("")
params = {}
for attr in dir(row):
if not attr[0] == "_" and not attr[:3] =="get":
#print attr
params[attr] = getattr(row, attr)
setattr(sourceobj, attr, getattr(row, attr))
sourceobj.params = params
try:
sourceobj.time = row.time_geocent_gps
except:
sourceobj.time = row.geocent_start_time
pass
return sourceobj
table_types = {
# Ad-Hoc
"ga" : lsctables.SimBurstTable,
"sg" : lsctables.SimBurstTable,
"wnb" : lsctables.SimBurstTable,
"sc" : lsctables.SimBurstTable,
# Supernova Families
"d08" : lsctables.SimBurstTable,
"s10" : lsctables.SimBurstTable,
"m12" : lsctables.SimBurstTable,
"o13" : lsctables.SimBurstTable,
"y10" : lsctables.SimBurstTable,
# Long Duration
"adi" : lsctables.SimBurstTable,
# Ringdown
"rng" : lsctables.SimRingdownTable,
"gng" : lsctables.SimRingdownTable,
}
tables = {
"burst" : lsctables.SimBurstTable,
"ringdown" : lsctables.SimRingdownTable
}
def mkdir(path):
"""
Make all of the tree of directories in a given path if they don't
already exist.
Parameters
----------
path : str
The path to the desired directory.
"""
sub_path = os.path.dirname(path)
if not os.path.exists(sub_path):
mkdir(sub_path)
if not os.path.exists(path):
os.mkdir(path)
class TableTypeError(Exception):
pass
class MDCSet():
inj_families_names = {'ga' : 'Gaussian',
'sg' : 'SineGaussian',
'wnb': 'BTLWNB',
"sc" : "StringCusp",
# Supernova families
'd08' : 'Dimmelmeier+08',
's10' : 'Scheidegger+10',
'm12' : 'Mueller+12',
'o13' : 'Ott+13',
'y10' : "Yakunin+10",
# Long-duration
'adi' : 'ADI',
# Ringdown
'rng' : "BBHRingdown",
'gng' : "GenericRingdown",
}
inj_families_abb = dict((v,k) for k,v in list(inj_families_names.items()))
hist_parameters = {
"StringCusp": ["amplitude", "ra", "dec"],
"SineGaussian": ["hrss", "psi", "ra", "dec"],
"Gaussian": ["hrss", "psi", "ra", "dec"],
"BTLWNB": ["hrss", "ra", "dec"],
"Dimmelmeier+08": ['hrss', 'ra', 'dec']
}
waveforms = []
def __init__(self, detectors, name='MDC Set', table_type = "burst"):
"""
Represents an MDC set, stored in an XML SimBurstTable file.
Parameters
----------
detectors : list
A list of detector names where the injections should be made.
name : str
A name for the MDC Set. Defaults to 'MDC Set'.
table_type : str
The type of table which should be generated. Default is `burst`,
which generates a SimBurstTable.
"""
self.detectors = detectors
self.waveforms = []
self.strains = []
self.egw = []
self.times = []
self.name = name
self.times = np.array(self.times)
self.table_type = tables[table_type]
def __add__(self, waveform):
"""
Handle a waveform being added to the MDC set.
Parameters
----------
waveform : Waveform object
The waveform which should be added to the MDC set.
"""
# Check that this type of waveform can go into this type of
# XML file.
if not table_types[self.inj_families_abb[waveform.waveform]] == self.table_type:
raise TableTypeError()
self.waveforms.append(waveform)
self.times = np.append(self.times, waveform.time)
def save_xml(self, filename):
"""
Save the MDC set as an XML SimBurstTable.
Parameters
----------
filename : str
The location to save the xml file. The output is gzipped, so ending it with
a ".gz" would stick with convention.
"""
xmldoc = ligolw.Document()
lw = xmldoc.appendChild(ligolw.LIGO_LW())
sim = lsctables.New(self.table_type)
lw.appendChild(sim)
# This needs to be given the proper metadata once the package has the maturity to
# write something sensible.
for waveform in self.waveforms:
procrow = process.register_to_xmldoc(xmldoc, "minke_burst_mdc+{}".format(minke.__version__), {}) # waveform.params)
try:
waveform_row = waveform._row(sim)
waveform_row.process_id = procrow.process_id
except:
row = sim.RowType()
for a in list(self.table_type.validcolumns.keys()):
if a in list(waveform.params.keys()):
setattr(row, a, waveform.params[a])
else:
if not hasattr(waveform, a):
setattr(row, a, 0)
else:
setattr(row, a, getattr(waveform, a))
row.waveform = waveform.waveform
if self.table_type == lsctables.SimBurstTable:
# Fill in the time
row.set_time_geocent(GPS(float(waveform.time)))
# Get the sky locations
row.ra, row.dec, row.psi = waveform.ra, waveform.dec, waveform.psi
row.simulation_id = waveform.simulation_id
row.waveform_number = random.randint(0,int(2**32)-1)
### !! This needs to be updated.
row.process_id = "process:process_id:0" #procrow.process_id
waveform_row = row
sim.append(waveform_row)
#del waveform_row
# Write out the xml and gzip it.
utils.write_filename(xmldoc, filename, gz=True)
def load_xml(self, filename, full=True, start=None, stop=None):
"""Load the MDC Set from an XML file containing the SimBurstTable.
Parameters
----------
filename : str
The filename of the XML file.
full : bool
If this is true (which is the default) then all of
the calculated parameters are computed from the waveform
definintion.
start : float
The time at which the xml read-in should
start. The default is "None", in which case the xml file
will be read-in from the start.
end : float
The last time to be read from the xml file. The default is None,
which causes the xml to be read right-up to the last time in the
file.
To Do
-----
A the moment this loads the information in to the object, but it
doesn't produce waveform objects for each of the injections in the
file. This should be fixed so that the object works symmetrically.
"""
i = 0
#sim_burst_table = lalburst.SimBurstTableFromLIGOLw(filename, start, stop)
xml = glue.ligolw.utils.load_filename(filename,
contenthandler = glue.ligolw.ligolw.LIGOLWContentHandler,
verbose = True)
sim_burst_table = glue.ligolw.table.get_table(xml, self.table_type.tableName)
for i,simrow in enumerate(sim_burst_table):
# This is an ugly kludge to get around the poor choice of wavform name in the xmls, and
if simrow.waveform[:3]=="s15":
self.numrel_file = str(sim_burst_table.waveform)
sim_burst_table.waveform = "Dimmelmeier+08"
self.waveforms.append(source_from_row(simrow))
if full:
self._measure_hrss(i)
self._measure_egw_rsq(i)
if self.table_type == tables["burst"]:
self.times = np.append(self.times, float(simrow.time_geocent_gps))
def _generate_burst(self,row,rate=16384.0):
"""
Generate the burst described in a given row, so that it can be
measured.
Parameters
----------
row : SimBurst Row
The row of the waveform to be measured
rate : float
The sampling rate of the signal, in Hz. Defaults to 16384.0Hz
Returns
-------
hp :
The strain in the + polarisation
hx :
The strain in the x polarisation
hp0 :
A copy of the strain in the + polarisation
hx0 :
A copy of the strain in the x polarisation
"""
row = self.waveforms[row]
hp, hx, hp0, hx0 = row._generate()
return hp, hx, hp0, hx0
def _getDetector(self, det):
"""
A method to return a LALDetector object corresponding to a detector's
X#-style name, e.g. 'H1' as the Hanford 4km detector.
Parameters
----------
det : str
A string describing the detector in the format letter-number, e.g
"H1" would be the Hanford 4km detector, "L1" would be the
Livingston 4km, and so-forth.
Returns
-------
detector : LALDetector
The LAL object describing the detector
"""
# get detector
return lalsimulation.DetectorPrefixToLALDetector(det)
#if det not in lal.cached_detector_by_prefix.keys():
# raise ValueError, "%s is not a cached detector. "\
# "Cached detectors are: %s" % (det, inject.cached_detector.keys())
#return lal.cached_detector_by_prefix[det]
def _timeDelayFromGeocenter(self, detector, ra, dec, gpstime):
"""
Calculate the time delay between the geocentre and a given detector
for a signal from some sky location.
Parameters
----------
detector : str
A string describing the detector, e.g. H1 is the Hanford 4km
detector.
ra : float
The right-ascension of the observation in radians
dec : float
The declination of the obser
"""
if isinstance(detector, str): detector = self._getDetector(detector)
gpstime = LIGOTimeGPS(float(gpstime))
return XLALTimeDelayFromEarthCenter(detector.location, ra, dec, gpstime)
def directory_path(self):
"""
Generate the directory where the frames from this MDC should be stored,
so, e.g. Gaussians 0d100 would go in "ga/ga0d100/"
Returns
-------
str
the folder structure
"""
name = self._simID(0)
abb = self.inj_families_abb[self.waveforms[0].waveform].lower()
return "{}/{}".format(abb, name)
def _simID(self, row):
"""
Generate a name for an injection set in the format expected by cWB
Parameters
----------
row : SimBurst
The simburst table row describing the injection
Returns
-------
str
The name of the injection in the cWB format
"""
row = self.waveforms[row]
name = ''
numberspart = ''
if row.waveform in ("Dimmelmeier+08", "Scheidegger+10", "Mueller+12", "Ott+13", "Yakunin+10"):
#print row
numberspart = os.path.basename(row.params['numrel_data']).split('.')[0]
if row.waveform == "Gaussian":
numberspart = "{:.3f}".format(row.duration * 1e3)
elif row.waveform == "SineGaussian":
if row.pol_ellipse_e==1.0:
pol="linear"
elif row.pol_ellipse_e==0.0:
pol="circular"
elif 0.0<row.pol_ellipse_e<1.0:
pol = "elliptical"
else:
pol = "inclined"
numberspart = "f{:.0f}_q{:.0f}_{}".format(row.frequency, row.q, pol)
elif row.waveform == "BTLWNB":
numberspart = "{}b{}tau{}".format(row.frequency, row.bandwidth, row.duration)
name += '{}_{}'.format(self.inj_families_abb[row.waveform].lower(), numberspart).replace('.','d')
return name
def _measure_hrss(self, row, rate=16384.0):
"""
Measure the various components of hrss (h+^2, hx^2, hphx) for a given
input row. This is accomplished by generating the burst and calling
the SWIG wrapped XLALMeasureHrss in lalsimulation.
Parameters
----------
row : int
The row number of the waveforms to be measured
rate : float
The sampling rate of the signal, in Hz. Defaults to 16384.0Hz
Returns
-------
hrss : float
The measured hrss of the waveform amplitude: sqrt(|Hp|^2 + |Hx|^2)
hphp : float
The hrss of the + polarisation only.
hxhx : float
The hrss of the x polarisation only.
hphx : float
The hrss of |HpHx|
"""
row = self.waveforms[row]
hp, hx, hp0, hx0 = row._generate() #self._generate_burst(row)# self.hp, self.hx, self.hp0, self.hx0
hp0.data.data *= 0
hx0.data.data *= 0
# H+ hrss only
hphp = lalsimulation.MeasureHrss(hp, hx0)**2
# Hx hrss only
hxhx = lalsimulation.MeasureHrss(hp0, hx)**2
# sqrt(|Hp|^2 + |Hx|^2)
hrss = lalsimulation.MeasureHrss(hp, hx)
hp.data.data = numpy.abs(hx.data.data) + numpy.abs(hp.data.data)
# |H+Hx|
hphx = (lalsimulation.MeasureHrss(hp, hx0)**2 - hrss**2)/2
#print hrss
self.strains.append([hrss, hphp, hxhx, hphx])
def _measure_egw_rsq(self, row, rate=16384.0):
"""
Measure the energy emitted in gravitational waves divided
by the distance squared in M_solar / pc^2. This is accomplished
by generating the burst and calling the SWIG wrapped
XLALMeasureHrss in lalsimulation.
Parameters
----------
row : int
The row number of the waveforms to be measured
rate : float
The sampling rate of the signal, in Hz. Defaults to 16384.0Hz
Returns
-------
egw : float
The energy emitted in gravitational waves divided
by the distance squared in M_solar / pc^2.
"""
hp, hx, _, _ = self._generate_burst(row)
self.egw.append(lalsimulation.MeasureEoverRsquared(hp, hx))
def _responses(self, row):
"""
Calculate the antenna repsonses for each detector to the waveform.
Parameters
----------
row : int
The row number of the waveforms to be measured
Returns
-------
responses : list of lists
A list containing the lists of antenna responses, with the first
element of each list containing the detector acronym.
"""
output = []
row = self.waveforms[row]
for detector in self.detectors:
time = row.time_geocent_gps + self._timeDelayFromGeocenter(detector, row.ra, row.dec, row.time_geocent_gps)
time = np.float64(time)
rs = response(time, row.ra, row.dec, 0, row.psi, 'radians', detector)
output.append([detector, time, rs[0], rs[1]] )
return output
def plot_skymap(self):
"""
Plot a skymap of the injections distribution in RA and DEC on a Hammer projection.
Returns
-------
matplotlib figure
"""
fig = plt.figure()
# Load the ra and dec numbers out of the waveforms
dec = [getattr(s, 'dec') for s in self.waveforms]
ra = [getattr(s, 'ra') for s in self.waveforms]
# Make the plot on a hammer projection
plt.subplot(111, projection='hammer')
H, x, y = np.histogram2d(ra, dec, [50, 25], range=[[0, 2*np.pi], [-np.pi/2, np.pi/2]])
dist = plt.pcolormesh(x-np.pi,y, H.T, cmap="viridis")
plt.title("Sky distribution")
plt.colorbar(dist, orientation='horizontal')
return fig
def plot_hist(self, parameter):
"""
Plot a histogram of a waveform parameter.
Parameters
----------
parameter : str
The name of the simburst table parameter which is desired for the plot.
Returns
-------
matplotlib figure
"""
fig = plt.figure()
prms = [getattr(s, parameter) for s in self.waveforms]
ax2 = plt.subplot(111)
ax2.set_title("{} distribution".format(parameter))
ax2.set_xlabel(parameter)
ax2.hist(prms, bins=100, log=True, histtype="stepfilled", alpha=0.6);
return fig
def gravEn_row(self, row, frame):
"""
Produces a gravEn-style log row for a row of the simBurstTable.
Parameters
----------
row : int
The row number of the waveforms to be measured
Returns
-------
str
A string in the gravEn format which describes the injection.
"""
strains = self.strains[row]
rowname = self._simID(row)
responses = self._responses(row)
energy = self.egw[row]
row = self.waveforms[row]
output = []
if not row.incl:
cosincl = ""
else:
cosincl = np.cos(row.incl)
output.append(self.name) # GravEn_SimID
output.append(strains[0]) # SimHrss
output.append(energy) # SimEgwR2
output.append(strains[0]) # GravEn_Ampl
output.append(cosincl) # Internal_x the cosine of the angle the LOS makes with axis of angular momentum
output.append(row.phi) # Intenal_phi angle between source x-axis and the LOS
output.append(np.cos(np.pi/2.0 - row.dec)) # cos(External_x) # this needs to be the co-declination
output.append(row.ra if row.ra < np.pi else row.ra - 2*np.pi)
# ^ External_phi # This is the RA projected onto an Earth-based coordinate system
output.append(row.psi) # External_psi # source's polarisation angle
output.append(frame.start) # FrameGPS
output.append(row.time_geocent_gps) # EarthCtrGPS
output.append(rowname) # SimName
output.append(strains[1]) # SimHpHp
output.append(strains[2]) # SimHcHc
output.append(strains[3]) # SimHpHp
output.append(" ".join(" ".join(map(str,l)) for l in responses))
return ' '.join(str(e) for e in output)
class Frame():
"""
Represents a frame, in order to prepare the injection frames
"""
def __init__(self, start, duration, ifo, number = -1):
"""
Parameters
----------
number : int
The frame's number within the project. Defaults to -1.
"""
self.start = start
self.duration = duration
self.end = self.start + duration
self.ifos = ifo
self.number = -1
def __repr__(self):
out = ''
out += "MDC Frame \n"
for ifo in self.ifos:
out += "{} {} {} \n".format(ifo, self.start, self.duration)
return out
def get_rowlist(self,mdcs):
"""
Return the rows from an MDC set which correspond to this frame.
Parameters
----------
mdcs : MDCSet object
The set of MDCs from which the rows are to be found.
"""
return np.where((mdcs.times<self.end)&(mdcs.times>self.start))[0]
def calculate_n_injections(self, mdcs):
return len(mdcs.times[(mdcs.times<self.end)&(mdcs.times>self.start)])
def generate_log(self,mdc):
log = '# GravEn_SimID SimHrss SimEgwR2 GravEn_Ampl Internal_x Internal_phi External_x External_phi External_psi FrameGPS EarthCtrGPS SimName SimHpHp SimHcHc SimHpHc H1 H1ctrGPS H1fPlus H1fCross L1 L1ctrGPS L1fPlus L1fCross\n'
rowlist = self.get_rowlist(mdc)
for row in rowlist:
log += mdc.gravEn_row(row, self)
log += "\n"
return log
def generate_gwf(self, mdc, directory, project = "Minke", channel="SCIENCE", force=False, rate=16384.0):
"""
Produce the gwf file which corresponds to the MDC set over the period of this frame.
Parameters
----------
mdc : MDCSet object
The MDC set which should be used to produce this frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
project : str
The name of the project which this frame is a part of. Defaults to 'Minke'.
channel : str
The name of the channel which the injections should be made into. This is prepended by the initials
for each interferometer, so there will be a channel for each interferometer in the gwf.
force : bool
If true this forces the recreation of a GWF file even if it already exists.
Outputs
-------
gwf
The GWF file for this frame.
"""
ifosstr = "".join(set(ifo[0] for ifo in self.ifos))
family = mdc.waveforms[0].waveform
epoch = lal.LIGOTimeGPS(self.start)
filename = "{}-{}-{}-{}.gwf".format(ifosstr, family, self.start, self.duration)
self.frame = lalframe.FrameNew(epoch = epoch,
duration = self.duration, project='', run=1, frnum=1,
detectorFlags=lal.LALDETECTORTYPE_ABSENT)
ifobits = np.array([getattr(lal,"{}_DETECTOR_BIT".format(lal.cached_detector_by_prefix[ifo].frDetector.name.upper()))
for ifo in self.ifos])
ifoflag = numpy.bitwise_or.reduce(ifobits)
RUN_NUM = -1 # Simulated data should have a negative run number
head_date = str(self.start)[:5]
frameloc = directory+"/"+mdc.directory_path()+"/"+head_date+"/"
mkdir(frameloc)
if not os.path.isfile(frameloc + filename) or force:
epoch = lal.LIGOTimeGPS(self.start)
frame = lalframe.FrameNew(epoch, self.duration, project, RUN_NUM, self.number, ifoflag)
data = []
# Loop through each interferometer
for ifo in self.ifos:
# Calculate the number of samples in the timeseries
nsamp = int((self.end-self.start)*rate)
# Make the timeseries
h_resp = lal.CreateREAL8TimeSeries("{}:{}".format(ifo, channel), epoch, 0, 1.0/rate, lal.StrainUnit, nsamp)
# Loop over all of the injections corresponding to this frame
rowlist = self.get_rowlist(mdc)
if len(rowlist)==0: return
for row in rowlist:
sim_burst = mdc.waveforms[row]._row()
if sim_burst.hrss > 1:
distance = sim_burst.amplitude
else:
distance = None
#hp, hx = lalburst.GenerateSimBurst(sim_burst, 1.0/rate);
hp, hx, _, _ = mdc.waveforms[row]._generate(rate=rate, half=True, distance=distance)
# Apply detector response
det = lalsimulation.DetectorPrefixToLALDetector(ifo)
# Produce the total strains
h_tot = lalsimulation.SimDetectorStrainREAL8TimeSeries(hp, hx,
sim_burst.ra, sim_burst.dec, sim_burst.psi, det)
# Inject the waveform into the overall timeseries
lalsimulation.SimAddInjectionREAL8TimeSeries(h_resp, h_tot, None)
lalframe.FrameAddREAL8TimeSeriesSimData(frame, h_resp)
# Make the directory in which to store the files
# if it doesn't exist already
mkdir(frameloc)
# Write out the frame file
lalframe.FrameWrite(frame, frameloc+filename)
class HWInj(Frame):
"""
Represents a hardware injection frame.
Injection frames must be an ASCII file of the hoft sampled at
the antenna sampling rate, appropriately convolved with an
antenna response function.
As a result of the simplicity of this specific output format
we do not need information such as start-time in the file itself,
however we should have a sensible naming scheme for the ASCII files
since they will need to be produced as sidecars for an xml file.
"""
def __init__(self, ifos):
"""We'll need to know the start-time, the duration, and the ifo
for each which is to be used for hardware injections in order
to keep consistency with the data in the xml file, and so that the
appropriate waveform is injected into the appropriate detector.
Parameters
----------
ifos : list
The name of the interferometers, e.g. "L1" for the Livingston, LA LIGO detector.
"""
self.ifos = ifos
def __repr__(self):
"""
The printable representation of this object.
"""
out = ""
out += "Hardware MDC Frame \n"
for ifo in self.ifos:
out += "{} \n".format(ifo)
return out
def generate_pcal(self, mdc, directory, force = False, rate=16384):
"""
Produce the PCAL-ready hardware injection files as an ASCII list
sampled at the detector's sample rate.
Parameters
----------
mdc : MDCSet object
The signal set which should be used to generate the frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
force : bool
If true this forces the regeneration of the file, even if it
already exists.
Outputs
-------
ascii file
The ASCII file containing the correctly sampled waveform convolved with
the antenna pattern.
"""
family = mdc.waveforms[0].waveform
frameloc = os.path.join(directory, (mdc.directory_path()))
#rowlist = self.get_rowlist(mdc)
# Unlike with a conventional frame, we need to produce a separate file
# for each IFO.
for ifo in self.ifos:
for sim_burst in mdc.waveforms:
#sim_burst = mdc.waveforms[row]
# Check if the file exists, or if we're forcing the creation
filename = "{}_{}_{}.txt".format(family,
sim_burst.time,
ifo)
if not os.path.isfile(frameloc + filename) or force:
data = []
epoch = lal.LIGOTimeGPS(sim_burst.time)
duration = 10
nsamp = duration*rate
h_tot = sim_burst._generate_for_detector([ifo], sample_rate=rate)
data = np.array(h_tot.data.data)
np.savetxt(filename, data)
class HWFrameSet():
def __init__(self, ifos=["H1", "L1"]):
"""
A collection of hardware injection frames.
Parameters
----------
frame_list : str
The filespath of a CSV file containing the list of frames,
and the parameters required to produce them: the start and
duration times, and the interferometers they describe.
"""
self.frames = []
self.frames = [HWInj(ifos)]
#self.frames.append(frame)
def full_frameset(self, mdc, directory, force=False):
"""
Produce the gwf files which corresponds to the MDC set over the period of the frames in this collection.
Parameters
----------
mdc : MDCSet object
The MDC set which should be used to produce this frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
force : bool
If true this forces the recreation of a GWF file even if it already exists.
Outputs
-------
ascii files
The ASCII files for these hardware injections.
"""
for frame in self.frames:
frame.generate_pcal(mdc, directory, force)
class FrameSet():
def __init__(self, frame_list):
"""
A collection of frames.
Parameters
----------
frame_list : str
The filespath of a CSV file containing the list of frames,
and the parameters required to produce them: the start and
duration times, and the interferometers they describe.
"""
self.frames = []
self.frame_list = frame_list = pd.read_csv(frame_list)
for frame in frame_list.iterrows():
frame = frame[1]
ifos = frame['ifo'].replace("['",'').replace("']",'').replace("'",'').split(' ')
frame = Frame(frame['start time'],frame['duration'],ifos)
self.frames.append(frame)
def full_frameset(self, mdc, directory, channel="SCIENCE", force=False):
"""
Produce the gwf files which corresponds to the MDC set over the period of the frames in this collection.
Parameters
----------
mdc : MDCSet object
The MDC set which should be used to produce this frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
channel : str
The name of the channel which the injections should be made into. This is prepended by the initials
for each interferometer, so there will be a channel for each interferometer in the gwf.
force : bool
If true this forces the recreation of a GWF file even if it already exists.
Outputs
-------
gwf files
The GWF files for these frames.
"""
for frame in self.frames:
frame.generate_gwf(mdc, directory, channel, force)
def full_logfile(self, mdc, location):
"""
Produce a log file for the entire frame set
"""
full_log = ''
for frame in self.frames:
full_log += frame.generate_log(mdc)
with open(location, "w") as text_file:
text_file.write(full_log)
| isc |
njwilson23/rasterio | rasterio/tool.py | 1 | 5429 | """
Implementations of various common operations, like `show()` for displaying an
array or with matplotlib, and `stats()` for computing min/max/avg. Most can
handle a numpy array or `rasterio.Band()`. Primarily supports `$ rio insp`.
"""
from __future__ import absolute_import
import code
import collections
import logging
import warnings
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
except RuntimeError as e:
# Certain environment configurations can trigger a RuntimeError like:
# Trying to import matplotlibRuntimeError: Python is not installed as a
# framework. The Mac OS X backend will not be able to function correctly
# if Python is not installed as a framework. See the Python ...
warnings.warn(str(e), RuntimeWarning, stacklevel=2)
plt = None
import numpy
import rasterio
from rasterio.five import zip_longest
logger = logging.getLogger('rasterio')
Stats = collections.namedtuple('Stats', ['min', 'max', 'mean'])
# Collect dictionary of functions for use in the interpreter in main()
funcs = locals()
def show(source, cmap='gray', with_bounds=True):
"""
Display a raster or raster band using matplotlib.
Parameters
----------
source : array-like or (raster dataset, bidx)
If array-like, should be of format compatible with
matplotlib.pyplot.imshow. If the tuple (raster dataset, bidx),
selects band `bidx` from raster.
cmap : str (opt)
Specifies the colormap to use in plotting. See
matplotlib.Colors.Colormap. Default is 'gray'.
with_bounds : bool (opt)
Whether to change the image extent to the spatial bounds of the image,
rather than pixel coordinates. Only works when source is
(raster dataset, bidx).
"""
if isinstance(source, tuple):
arr = source[0].read(source[1])
xs = source[0].res[0] / 2.
ys = source[0].res[1] / 2.
if with_bounds:
extent = (source[0].bounds.left - xs, source[0].bounds.right - xs,
source[0].bounds.bottom - ys, source[0].bounds.top - ys)
else:
extent = None
else:
arr = source
extent = None
if plt is not None:
imax = plt.imshow(arr, cmap=cmap, extent=extent)
fig = plt.gcf()
fig.show()
else:
raise ImportError("matplotlib could not be imported")
def stats(source):
"""Return a tuple with raster min, max, and mean.
"""
if isinstance(source, tuple):
arr = source[0].read(source[1])
else:
arr = source
return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
def show_hist(source, bins=10, masked=True, title='Histogram'):
"""
Easily display a histogram with matplotlib.
Parameters
----------
bins : int, optional
Compute histogram across N bins.
data : np.array or rasterio.Band or tuple(dataset, bidx)
Input data to display. The first three arrays in multi-dimensional
arrays are plotted as red, green, and blue.
masked : bool, optional
When working with a `rasterio.Band()` object, specifies if the data
should be masked on read.
title : str, optional
Title for the figure.
"""
if plt is None:
raise ImportError("Could not import matplotlib")
if isinstance(source, (tuple, rasterio.Band)):
arr = source[0].read(source[1], masked=masked)
else:
arr = source
# The histogram is computed individually for each 'band' in the array
# so we need the overall min/max to constrain the plot
rng = arr.min(), arr.max()
if len(arr.shape) is 2:
arr = [arr]
colors = ['gold']
else:
colors = ('red', 'green', 'blue', 'violet', 'gold', 'saddlebrown')
# If a rasterio.Band() is given make sure the proper index is displayed
# in the legend.
if isinstance(source, (tuple, rasterio.Band)):
labels = [str(source[1])]
else:
labels = (str(i + 1) for i in range(len(arr)))
# This loop should add a single plot each band in the input array,
# regardless of if the number of bands exceeds the number of colors.
# The colors slicing ensures that the number of iterations always
# matches the number of bands.
# The goal is to provide a curated set of colors for working with
# smaller datasets and let matplotlib define additional colors when
# working with larger datasets.
for bnd, color, label in zip_longest(arr, colors[:len(arr)], labels):
plt.hist(
bnd.flatten(),
bins=bins,
alpha=0.5,
color=color,
label=label,
range=rng
)
plt.legend(loc="upper right")
plt.title(title, fontweight='bold')
plt.grid(True)
plt.xlabel('DN')
plt.ylabel('Frequency')
fig = plt.gcf()
fig.show()
def main(banner, dataset, alt_interpreter=None):
""" Main entry point for use with python interpreter """
local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)
if not alt_interpreter:
code.interact(banner, local=local)
elif alt_interpreter == 'ipython':
import IPython
IPython.InteractiveShell.banner1 = banner
IPython.start_ipython(argv=[], user_ns=local)
else:
raise ValueError("Unsupported interpreter '%s'" % alt_interpreter)
return 0
| bsd-3-clause |
Harhoy/transport | transport.py | 1 | 9259 | from __future__ import division
import numpy as np
import math as m
from easygui import multenterbox
import pandas as pd
import matplotlib.pyplot as plt
import math as m
def import_xl(file_path):
df = pd.read_excel(file_path,header = None)
df = df.values
return df
def export_xl(file_path,sheets):
writer = pd.ExcelWriter(file_path)
for sheet,name in sheets.items():
df = pd.DataFrame(name)
df.to_excel(writer,sheet)
writer.save()
#Henter ut en kolonne
def column(matrix, i):
return [row[i] for row in matrix]
#Henter ut en rad
def row(matrix, i):
return [column[i] for column in matrix]
#Selection sort O(n2)
def selection_sort(array):
n = len(array)
for i in range(0,n):
smallest = i
for j in range(i,n):
if array[j]<array[smallest]:
smallest = j
copy = array[i]
array[i] = array[smallest]
array[smallest] = copy
return array
#Leser om to lister inneholder minst ett felles tall
def common_node(array_1,array_2):
x = selection_sort(array_1)
y = selection_sort(array_2)
i = 0
j = 0
share = 0
stop = max([len(x),len(y)])-1
while min([i,j])< stop:
if x[i]>y[j]:
j+=1
elif x[i]<y[j]:
i+=1
else:
share = 1
j = 10**6
i = 10**6
return share
def common_node_count(array_1,array_2):
x = selection_sort(array_1)
y = selection_sort(array_2)
i = 0
j = 0
share = 0
while i < len(x) and j < len(y):
if x[i]>y[j]:
j+=1
elif x[i]<y[j]:
i+=1
else:
share += 1
j +=1
i +=1
return share
#KORTERSTE RUTE FUNKSJONER
#Lager en graf fra lenke-liste
def make_graph(array):
#nodes = common_node_count(column(array,0),column(array,1))
nodes = 35
matrix = np.full((nodes,nodes),10**6) #Lager matrise med store tall som byttes
for i in range(0,len(array)): #Hovedloop
#Trekker fra en for sammenlignbarhet med python-arrays
matrix[array[i][1]-1][array[i][0]-1] = array[i][2]
matrix[array[i][0]-1][array[i][1]-1] = array[i][2]
np.fill_diagonal(matrix, 0)
return matrix
#Lager lengdematrise n x n
def floyd_warshall(array):
matrix = make_graph(array)
#nodes = common_node_count(column(array,0),column(array,1))
nodes = 35
pred = np.full((nodes,nodes),-1)
for i in range(0,nodes):
for j in range(0,nodes):
if i != j:
pred[i][j] = i
for k in range(0,nodes):
for i in range(0,nodes):
for j in range(0,nodes):
if matrix[i][j] > matrix[i][k] + matrix[k][j]:
matrix[i][j] = matrix[i][k] + matrix[k][j]
pred[i][j] = pred[k][j]
return matrix,pred
#Laster inn data fra en csv fil til et nettverksarray
def get_network(net_csv):
graf = open(net_csv,'r')
lenker=0
for line in graf:
lenker+=1
graf_edit = np.full((lenker, 3),0)
graf = open(net_csv,'r')
k = 0
for line in graf:
stuff = line.split(";")
graf_edit[k][0] = float(stuff[0])
graf_edit[k][1] = float(stuff[1])
temp = stuff[2].split('\n')[0]
graf_edit[k][2] = float(temp)
k+=1
return graf_edit
#Lager en path-vektor
def path(p,i,j,path_vec):
if i == j:
path_vec.append(i)
else:
path(p, i, p[i][j],path_vec)
path_vec.append(j)
#Henter en spesifikk path
def get_path(p,i,j):
#j = j + 1
path_vec=[]
path(p,i,j,path_vec)
#for i in range(1,len(path_vec)):
# path_vec[i] = path_vec[i] - 1
return path_vec
#Lager adjecency-matrise (ikke ferdig)
def build_adj(pred):
adj_mat = np.zeros((len(pred),len(pred)))
array_a = []
array_b = []
for i in range(1,len(pred)):
for j in range(1,len(pred)):
array_a = get_path(pred,i,j)
print array_a
array_b = get_path(pred,2,10)
print array_b
try:
adj_mat[1][j] = common_node(array_a,array_b)
except:
adj_mat[1][j] = 0
print adj_mat[1][j]
return adj_mat
#Nettverkslaster
#Argumenter: (1) Forgjenger-matrise (2) antall noder (3) nettverksfil (4) od-matrise
def network_loader(graf,net,od,pred):
#Antall noder
n = len(od)-1
#Redigering
for k in range(0,len(net)):
net[k][3]=0 #Nulllstiller antall reiser
net[k][2]=graf[k][2] #Legger inn oppdaterteavstander fra grafen
#Legger ut reiser paa nettet
for i in range(0,n):
for j in range(0,n):
path = get_path(pred,i,j)
len_path=get_len_path(path)
for h in range(0,len_path):
for k in range(0,len(net)):
if net[k][0] == path[h]+1 and net[k][1] == path[1+h]+1:
net[k][3] += int(od[i][j])
elif net[k][1] == path[h]+1 and net[k][0] == path[1+h]+1:
net[k][3] += int(od[i][j])
return net
#a=get_path(pred,5,12)
#GRAVITASJONSFUNKSJONER
def deter_mat_make(length_mat):
deter_mat = np.zeros((len(length_mat),len(length_mat)))
for i in range(0,len(length_mat)):
for j in range(0,len(length_mat)):
deter_mat[i][j] = deter(length_mat[i][j])
return deter_mat
def deter(length):
return 2.71**(beta*length)
def sumproduct(list1,list2):
sums = 0
for i in range(0,len(list1)):
sums += list1[i]*list2[i]
return sums
def gravity(origin, destination, length_mat):
#Initialization
deter_mat = deter_mat_make(length_mat) #Lager matrise med forvitring
dimension = len(origin) #Henter ut matrisedimensjonene
alpha = [1]*(dimension) #Intitierer alpha-vektor
beta = [1]*(dimension) #Intitierer beta-vektor
largest = 10**6 #Intitierer storste avvik
alpha_last = alpha #Intitierer alpha -1
beta_last = beta #Intitierer beta -1
k = 0 #Intitierer tellevariabler for iterasjoner
iterasjoner = []
#Hovedlokke
while largest > .00001:
#Oppdaterer faktorene
for p in range(0,dimension):
alpha[p] = origin[p]/(sumproduct(beta_last,column(deter_mat,p)))
beta[p] = destination[p]/(sumproduct(alpha,row(deter_mat,p)))
largest = 0
#Looper for aa finne storste element
for j in range(0,dimension):
current = alpha[j]*sumproduct(beta,column(deter_mat,j))-origin[j]
if current>largest:
largest = current
#Setter forrige beta
beta_last = beta
iterasjoner.append(largest)
#Legger til en iterasjon
k+=1
print "Konvergens, Gravitasjonsmodell", largest
if k == maxiter:
largest = 0
return alpha,beta,k,iterasjoner
def create_od(origin,destination, length_mat):
alpha,beta,k,iterasjoner = gravity(origin, destination, length_mat)
deter_mat = deter_mat_make(length_mat)
od = np.zeros((len(origin),len(origin)))
for i in range(0,len(origin)):
for j in range(0,len(origin)):
od[i][j] = alpha[i]*beta[j]*deter_mat[i][j]
return od,alpha,beta,k,iterasjoner
def calc_pt_matrix(od,length_mat):
out_od = np.zeros((len(od),len(od)))
for i in range(0,len(od)):
for j in range(0,len(od)):
out_od[i][j] = int(out_od[i][j])*length_mat[i][j]
return out_od
def get_min(net):
smallest = 10**6
smallest_id = 10**6
for i in range(0,len(net)):
if net[i][3]/net[i][2]<smallest and net[i][5]==0:
smallest = net[i][3]/net[i][2]
smallest_id = i
return smallest_id,smallest
def change_graph(graph,net):
graph_out = graph
for i in range(0,len(net)):
if net[i][5]==1:
graph_out[i][2]=k_just*graph_out[i][2]
return graph_out
def production(net):
sumcost = 0
for i in range(0,len(net)):
if net[i][5]!=1:
sumcost += (net[i][3]/capacity)*net[i][2]
return sumcost
def sum_pass(net):
sumpass = 0
for i in range(0,len(net)):
sumpass+=net[i][3]
return sumpass
def get_len_path(path):
len_path = 0
if len(path) < 3:
len_path = 0
elif len(path) == 3:
len_path = 2
else:
len_path=int(len(path)/2)+int(len(path)%2)+1
return len_path
def obj(od,length_mat,net,prodgoal):
return (production(net)*kmk*dogn-prodgoal)**2*(k_just-1)*capacity/.9+time_cost(od,length_mat)
def time_cost(od,length_mat):
cost = 0
for i in range(0,len(od)-1):
for j in range(0,len(od)-1):
cost += od[i][j]*length_mat[i][j]
return cost
def get_zero_net(net):
zero_net = np.zeros((len(net),6))
for i in range(0,len(net)):
zero_net[i][2] = net[i][2]
zero_net[i][3] = net[i][3]
zero_net[i][5] = net[i][5]
return zero_net
def update_zero_net(net,zero_net):
for i in range(0,len(net)):
zero_net[i][5] = net[i][5]
return zero_net
| mit |
danforthcenter/plantcv | plantcv/plantcv/visualize/obj_size_ecdf.py | 1 | 1554 | # Plot Empirical Cumulative Distribution Function for Object Size
import os
import cv2
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
from statsmodels.distributions.empirical_distribution import ECDF
from plotnine import ggplot, aes, geom_point, labels, scale_x_log10
def obj_size_ecdf(mask, title=None):
"""
Plot empirical cumulative distribution for object size based on binary mask.
Inputs:
mask = binary mask
title = a custom title for the plot (default=None)
Returns:
fig_ecdf = empirical cumulative distribution function plot
:param mask: numpy.ndarray
:param title: str
:return fig_ecdf: plotnine.ggplot.ggplot
"""
objects, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
areas = [cv2.contourArea(cnt) for cnt in objects]
# Remove objects with areas < 1px
areas = [i for i in areas if i >= 1.0]
ecdf = ECDF(areas, side='right')
ecdf_df = pd.DataFrame({'object area': ecdf.x[1:], 'cumulative probability': ecdf.y[1:]})
# create ecdf plot and apply log-scale for x-axis (areas)
fig_ecdf = (ggplot(data=ecdf_df, mapping=aes(x='object area', y='cumulative probability'))
+ geom_point(size=.1)
+ scale_x_log10())
if title is not None:
fig_ecdf = fig_ecdf + labels.ggtitle(title)
# Plot or print the ecdf
_debug(visual=fig_ecdf,
filename=os.path.join(params.debug_outdir, str(params.device) + '_area_ecdf.png'))
return fig_ecdf
| mit |
lalitkumarj/NEXT-psych | next/apps/TupleBanditsPureExploration/Dashboard.py | 1 | 3313 | """
TupleBanditsPureExplorationDashboard
author: Nick Glattard, n.glattard@gmail.com
last updated: 4/24/2015
######################################
TupleBanditsPureExplorationDashboard
"""
import json
import numpy
import numpy.random
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from next.utils import utils
from next.apps.AppDashboard import AppDashboard
class TupleBanditsPureExplorationDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def get_app_supported_stats(self):
"""
Returns a list of dictionaries describing the identifier (stat_id) and
necessary params inputs to be used when calling getStats
Expected output (list of dicts, each with fields):
(string) stat_id : the identiifer of the statistic
(string) description : docstring of describing outputs
(list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task'
"""
stat_list = self.get_supported_stats()
stat = {}
stat['stat_id'] = 'most_current_ranking'
stat['description'] = self.most_current_ranking.__doc__
stat['necessary_params'] = ['alg_label']
stat_list.append(stat)
return stat_list
def most_current_ranking(self,app_id,exp_uid,alg_label):
"""
Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field' where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to
Expected output (in dict):
plot_type : 'columnar_table'
headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ]
(list of dicts with fields) data (each dict is a row, each field is the column for that row):
(int) index : index of target
(int) ranking : rank (0 to number of targets - 1) representing belief of being best arm
"""
alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list')
for algorithm in alg_list:
if algorithm['alg_label'] == alg_label:
alg_id = algorithm['alg_id']
alg_uid = algorithm['alg_uid']
list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid})
list_of_log_dict = sorted(list_of_log_dict, key=lambda k: k['num_reported_answers'] )
print didSucceed, message
item = list_of_log_dict[-1]
return_dict = {}
return_dict['headers'] = [{'label':'Rank','field':'rank'},{'label':'Target','field':'index'},{'label':'Score','field':'score'},{'label':'Precision','field':'precision'}]
return_dict['data'] = item['targets']
return_dict['plot_type'] = 'columnar_table'
return return_dict
| apache-2.0 |
jgowans/correlation_plotter | rfi_looker.py | 1 | 1835 | #!/usr/bin/env python
import os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
results_directory = os.getenv('HOME') + "/rfi_capture_results/"
SAMPLE_FREQUENCY = 3600.0 # MHz and us
ADC_SCALE_VALUE = 707.94
# algorithm:
# open a .npy file (or do the disk buffer thing)
filename = raw_input("what file should be open? [most recent] ")
if filename == "": # default to the most recent file
filename = "/tmp/rfi_signal.npy"
else:
filename = results_directory + filename
signal = np.load(filename)
decimation_factor = int(len(signal)/2**20) + 1
print "decimation factor: " + str(decimation_factor)
if decimation_factor >= 2 :
signal_decimated = scipy.signal.decimate(signal, decimation_factor, n=1, ftype="fir")
else:
signal_decimated = signal
print "len : " + str(len(signal_decimated))
axis = np.linspace(0, decimation_factor * len(signal_decimated)/SAMPLE_FREQUENCY, len(signal_decimated), endpoint=False)
plt.plot(axis, signal_decimated, "b.")
plt.show()
# plot the signal decimated by a paramamter (defualt: 1)
# ask the user to input a subplot time
start_time = float(raw_input("At what time (microseconds) does the signal start? "))
end_time = float(raw_input("At what time (microseconds) does the signal end? "))
start_sample = int( start_time * SAMPLE_FREQUENCY )
end_sample = int( end_time * SAMPLE_FREQUENCY )
subsignal = signal[start_sample:end_sample]
subsignal_axis = np.linspace(start_time, end_time, len(subsignal), endpoint=False)
spectrum = np.fft.rfft(subsignal)
spectrum_axis = np.linspace(0, SAMPLE_FREQUENCY/2, len(spectrum), endpoint=False)
plt.subplot(211)
plt.plot(subsignal_axis, subsignal)
plt.subplot(212)
plt.plot(spectrum_axis, 10*np.log10( np.abs(spectrum) / (ADC_SCALE_VALUE*len(spectrum) )))
plt.show()
# plot the subplot and the fft of the subplot
| mit |
dblN/misc | utils.py | 1 | 3046 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from keras.layers import Dense
from keras.preprocessing.image import apply_transform
import matplotlib.pyplot as plt
def take_glimpses(image, location, sizes):
glimpses = []
resize = sizes[0]
for size in sizes:
glimpse = tf.image.extract_glimpse(image, size=size, offsets=location,
normalized=True, centered=True, uniform_noise=False)
glimpses += [tf.image.resize_images(glimpse, resize)]
return glimpses
def glimpse_network(image, location, sizes, activation="relu",
glimpse_num_features=128, location_num_features=128, output_dim=256):
assert len(sizes) == 3
with tf.variable_scope("glimpse_network"):
glimpses = []
resize = sizes[0]
for size in sizes:
glimpse = tf.image.extract_glimpse(image, size=size, offsets=location, uniform_noise=False,
normalized=True, centered=True)
glimpses += [tf.image.resize_images(glimpse, resize[0], resize[1])]
glimpse = tf.concat(-1, glimpses)
glimpse = tf.reshape(glimpse, (-1, np.prod(resize) * len(sizes)))
glimpse_feature = Dense(glimpse_num_features, activation=activation)(glimpse)
location_feature = Dense(location_num_features, activation=activation)(location)
feature = Dense(output_dim, activation=activation)(glimpse_feature + location_feature)
return feature, glimpses
def accuracy_score(y_preds, y_true):
return np.sum((y_preds == y_true).astype(np.float32)) / y_preds.shape[0]
def translate(batch_x, size=(128, 128)):
"""Make translated mnist"""
height = batch_x.shape[1]
width = batch_x.shape[2]
X = np.zeros((batch_x.shape[0],) + size + (1,), dtype=batch_x.dtype)
X[:, :height, :width, :] = batch_x
for i, x in enumerate(X[:]):
tx = np.random.uniform(-(size[1] - width), 0)
ty = np.random.uniform(-(size[0] - height), 0)
translation_matrix = np.asarray([
[1, 0, tx],
[0, 1, ty],
[0, 0, 1]
], dtype=batch_x.dtype)
X[i, :, :, :] = apply_transform(x, translation_matrix, channel_index=2, fill_mode="nearest", cval=0.)
return X
def plot_glimpse(images, locations, name="glimpse.png"):
image = images[0]
location = locations[:, 0, :]
fig = plt.figure()
plt.imshow(image, cmap=plt.get_cmap("gray"))
plt.plot(location[:, 0], location[:, 1])
for i, (x, y) in enumerate(location):
plt.annotate("t=%d" % i, xy=(x, y), xytext=(-10, 10),
textcoords="offset points", ha="right", va="bottom",
bbox=dict(boxstyle="round, pad=0.5", fc="white", alpha=0.5),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=0"))
plt.savefig(name)
plt.gcf().clear()
plt.close("all")
| mit |
rseubert/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 39 | 4706 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
"""Check lasso stability path"""
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
"""Check randomized lasso"""
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
"""Check randomized sparse logistic regression"""
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
"""Check randomized sparse logistic regression on sparse data"""
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
pyxll/pyxll-examples | matplotlib/interactiveplot.py | 1 | 3441 | """
Example code showing how to draw an interactive matplotlib figure
in Excel.
While the figure is displayed Excel is still useable in the background
and the chart may be updated with new data by calling the same
function again.
"""
from pyxll import xl_func
from pandas.stats.moments import ewma
# matplotlib imports
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
# Qt imports
from PySide import QtCore, QtGui
import timer # for polling the Qt application
# dict to keep track of any chart windows
_plot_windows = {}
@xl_func("string figname, numpy_column<float> xs, numpy_column<float> ys, int span: string")
def mpl_plot_ewma(figname, xs, ys, span):
"""
Show a matplotlib line plot of xs vs ys and ewma(ys, span) in an interactive window.
:param figname: name to use for this plot's window
:param xs: list of x values as a column
:param ys: list of y values as a column
:param span: ewma span
"""
# Get the Qt app.
# Note: no need to 'exec' this as it will be polled in the main windows loop.
app = get_qt_app()
# create the figure and axes for the plot
fig = Figure(figsize=(600, 600), dpi=72, facecolor=(1, 1, 1), edgecolor=(0, 0, 0))
ax = fig.add_subplot(111)
# calculate the moving average
ewma_ys = ewma(ys, span=span)
# plot the data
ax.plot(xs, ys, alpha=0.4, label="Raw")
ax.plot(xs, ewma_ys, label="EWMA")
ax.legend()
# generate the canvas to display the plot
canvas = FigureCanvas(fig)
# Get or create the Qt windows to show the chart in.
if figname in _plot_windows:
# get from the global dict and clear any previous widgets
window = _plot_windows[figname]
layout = window.layout()
if layout:
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
else:
# create a new window for this plot and store it for next time
window = QtGui.QWidget()
window.resize(800, 600)
window.setWindowTitle(figname)
_plot_windows[figname] = window
# create the navigation toolbar
toolbar = NavigationToolbar(canvas, window)
# add the canvas and toolbar to the window
layout = window.layout() or QtGui.QVBoxLayout()
layout.addWidget(canvas)
layout.addWidget(toolbar)
window.setLayout(layout)
window.show()
return "[Plotted '%s']" % figname
#
# Taken from the ui/qt.py example
#
def get_qt_app():
"""
returns the global QtGui.QApplication instance and starts
the event loop if necessary.
"""
app = QtCore.QCoreApplication.instance()
if app is None:
# create a new application
app = QtGui.QApplication([])
# use timer to process events periodically
processing_events = {}
def qt_timer_callback(timer_id, time):
if timer_id in processing_events:
return
processing_events[timer_id] = True
try:
app = QtCore.QCoreApplication.instance()
if app is not None:
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
finally:
del processing_events[timer_id]
timer.set_timer(100, qt_timer_callback)
return app
| unlicense |
hsuantien/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/dtypes/concat.py | 6 | 16002 | """
Utility functions related to concat
"""
import numpy as np
import pandas._libs.tslib as tslib
from pandas import compat
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_sparse,
is_datetimetz,
is_datetime64_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_object_dtype,
is_bool_dtype,
is_dtype_equal,
_NS_DTYPE,
_TD_DTYPE)
from pandas.core.dtypes.generic import (
ABCDatetimeIndex, ABCTimedeltaIndex,
ABCPeriodIndex)
def get_dtype_kinds(l):
"""
Parameters
----------
l : list of arrays
Returns
-------
a set of kinds that exist in this list of arrays
"""
typs = set()
for arr in l:
dtype = arr.dtype
if is_categorical_dtype(dtype):
typ = 'category'
elif is_sparse(arr):
typ = 'sparse'
elif is_datetimetz(arr):
# if to_concat contains different tz,
# the result must be object dtype
typ = str(arr.dtype)
elif is_datetime64_dtype(dtype):
typ = 'datetime'
elif is_timedelta64_dtype(dtype):
typ = 'timedelta'
elif is_object_dtype(dtype):
typ = 'object'
elif is_bool_dtype(dtype):
typ = 'bool'
elif is_period_dtype(dtype):
typ = str(arr.dtype)
else:
typ = dtype.kind
typs.add(typ)
return typs
def _get_series_result_type(result):
"""
return appropriate class of Series concat
input is either dict or array-like
"""
if isinstance(result, dict):
# concat Series with axis 1
if all(is_sparse(c) for c in compat.itervalues(result)):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
else:
from pandas.core.frame import DataFrame
return DataFrame
elif is_sparse(result):
# concat Series with axis 1
from pandas.core.sparse.api import SparseSeries
return SparseSeries
else:
from pandas.core.series import Series
return Series
def _get_frame_result_type(result, objs):
"""
return appropriate class of DataFrame-like concat
if any block is SparseBlock, return SparseDataFrame
otherwise, return 1st obj
"""
if any(b.is_sparse for b in result.blocks):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
else:
return objs[0]
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overall dtype if possible)
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
# filter empty arrays
# 1-d dtypes always are included here
def is_nonempty(x):
try:
return x.shape[axis] > 0
except Exception:
return True
nonempty = [x for x in to_concat if is_nonempty(x)]
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
typs = get_dtype_kinds(to_concat)
_contains_datetime = any(typ.startswith('datetime') for typ in typs)
_contains_period = any(typ.startswith('period') for typ in typs)
if 'category' in typs:
# this must be priort to _concat_datetime,
# to support Categorical + datetime-like
return _concat_categorical(to_concat, axis=axis)
elif _contains_datetime or 'timedelta' in typs or _contains_period:
return _concat_datetime(to_concat, axis=axis, typs=typs)
# these are mandated to handle empties as well
elif 'sparse' in typs:
return _concat_sparse(to_concat, axis=axis, typs=typs)
if not nonempty:
# we have all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
# cast this to float)
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:
if (not len(typs - set(['i', 'u', 'f'])) or
not len(typs - set(['bool', 'i', 'u']))):
# let numpy coerce
pass
else:
# coerce to object
to_concat = [x.astype('object') for x in to_concat]
return np.concatenate(to_concat, axis=axis)
def _concat_categorical(to_concat, axis=0):
"""Concatenate an object/categorical array of arrays, each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : int
Axis to provide concatenation in the current implementation this is
always 0, e.g. we only have 1D categoricals
Returns
-------
Categorical
A single array, preserving the combined dtypes
"""
def _concat_asobject(to_concat):
to_concat = [x.get_values() if is_categorical_dtype(x.dtype)
else x.ravel() for x in to_concat]
res = _concat_compat(to_concat)
if axis == 1:
return res.reshape(1, len(res))
else:
return res
# we could have object blocks and categoricals here
# if we only have a single categoricals then combine everything
# else its a non-compat categorical
categoricals = [x for x in to_concat if is_categorical_dtype(x.dtype)]
# validate the categories
if len(categoricals) != len(to_concat):
pass
else:
# when all categories are identical
first = to_concat[0]
if all(first.is_dtype_equal(other) for other in to_concat[1:]):
return union_categoricals(categoricals)
return _concat_asobject(to_concat)
def union_categoricals(to_union, sort_categories=False, ignore_order=False):
"""
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
or Series with dtype='category'
sort_categories : boolean, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order: boolean, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
.. versionadded:: 0.20.0
Returns
-------
result : Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
"""
from pandas import Index, Categorical, CategoricalIndex, Series
if len(to_union) == 0:
raise ValueError('No Categoricals to union')
def _maybe_unwrap(x):
if isinstance(x, (CategoricalIndex, Series)):
return x.values
elif isinstance(x, Categorical):
return x
else:
raise TypeError("all components to combine must be Categorical")
to_union = [_maybe_unwrap(x) for x in to_union]
first = to_union[0]
if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype)
for other in to_union[1:]):
raise TypeError("dtype of categories must be the same")
ordered = False
if all(first.is_dtype_equal(other) for other in to_union[1:]):
# identical categories - fastpath
categories = first.categories
ordered = first.ordered
new_codes = np.concatenate([c.codes for c in to_union])
if sort_categories and not ignore_order and ordered:
raise TypeError("Cannot use sort_categories=True with "
"ordered Categoricals")
if sort_categories and not categories.is_monotonic_increasing:
categories = categories.sort_values()
indexer = categories.get_indexer(first.categories)
from pandas.core.algorithms import take_1d
new_codes = take_1d(indexer, new_codes, fill_value=-1)
elif ignore_order or all(not c.ordered for c in to_union):
# different categories - union and recode
cats = first.categories.append([c.categories for c in to_union[1:]])
categories = Index(cats.unique())
if sort_categories:
categories = categories.sort_values()
new_codes = []
for c in to_union:
if len(c.categories) > 0:
indexer = categories.get_indexer(c.categories)
from pandas.core.algorithms import take_1d
new_codes.append(take_1d(indexer, c.codes, fill_value=-1))
else:
# must be all NaN
new_codes.append(c.codes)
new_codes = np.concatenate(new_codes)
else:
# ordered - to show a proper error message
if all(c.ordered for c in to_union):
msg = ("to union ordered Categoricals, "
"all categories must be the same")
raise TypeError(msg)
else:
raise TypeError('Categorical.ordered must be the same')
if ignore_order:
ordered = False
return Categorical(new_codes, categories=categories, ordered=ordered,
fastpath=True)
def _concat_datetime(to_concat, axis=0, typs=None):
"""
provide concatenation of an datetimelike array of arrays each of which is a
single M8[ns], datetimet64[ns, tz] or m8[ns] dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
def convert_to_pydatetime(x, axis):
# coerce to an object dtype
# if dtype is of datetimetz or timezone
if x.dtype.kind == _NS_DTYPE.kind:
if getattr(x, 'tz', None) is not None:
x = x.asobject.values
else:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(),
box=True)
x = x.reshape(shape)
elif x.dtype == _TD_DTYPE:
shape = x.shape
x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel(), box=True)
x = x.reshape(shape)
if axis == 1:
x = np.atleast_2d(x)
return x
if typs is None:
typs = get_dtype_kinds(to_concat)
# must be single dtype
if len(typs) == 1:
_contains_datetime = any(typ.startswith('datetime') for typ in typs)
_contains_period = any(typ.startswith('period') for typ in typs)
if _contains_datetime:
if 'datetime' in typs:
new_values = np.concatenate([x.view(np.int64) for x in
to_concat], axis=axis)
return new_values.view(_NS_DTYPE)
else:
# when to_concat has different tz, len(typs) > 1.
# thus no need to care
return _concat_datetimetz(to_concat)
elif 'timedelta' in typs:
new_values = np.concatenate([x.view(np.int64) for x in to_concat],
axis=axis)
return new_values.view(_TD_DTYPE)
elif _contains_period:
# PeriodIndex must be handled by PeriodIndex,
# Thus can't meet this condition ATM
# Must be changed when we adding PeriodDtype
raise NotImplementedError
# need to coerce to object
to_concat = [convert_to_pydatetime(x, axis) for x in to_concat]
return np.concatenate(to_concat, axis=axis)
def _concat_datetimetz(to_concat, name=None):
"""
concat DatetimeIndex with the same tz
all inputs must be DatetimeIndex
it is used in DatetimeIndex.append also
"""
# do not pass tz to set because tzlocal cannot be hashed
if len(set([str(x.dtype) for x in to_concat])) != 1:
raise ValueError('to_concat must have the same tz')
tz = to_concat[0].tz
# no need to localize because internal repr will not be changed
new_values = np.concatenate([x.asi8 for x in to_concat])
return to_concat[0]._simple_new(new_values, tz=tz, name=name)
def _concat_index_asobject(to_concat, name=None):
"""
concat all inputs as object. DatetimeIndex, TimedeltaIndex and
PeriodIndex are converted to object dtype before concatenation
"""
klasses = ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex
to_concat = [x.asobject if isinstance(x, klasses) else x
for x in to_concat]
from pandas import Index
self = to_concat[0]
attribs = self._get_attributes_dict()
attribs['name'] = name
to_concat = [x._values if isinstance(x, Index) else x
for x in to_concat]
return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
def _concat_sparse(to_concat, axis=0, typs=None):
"""
provide concatenation of an sparse/dense array of arrays each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
from pandas.core.sparse.array import SparseArray, _make_index
def convert_sparse(x, axis):
# coerce to native type
if isinstance(x, SparseArray):
x = x.get_values()
x = x.ravel()
if axis > 0:
x = np.atleast_2d(x)
return x
if typs is None:
typs = get_dtype_kinds(to_concat)
if len(typs) == 1:
# concat input as it is if all inputs are sparse
# and have the same fill_value
fill_values = set(c.fill_value for c in to_concat)
if len(fill_values) == 1:
sp_values = [c.sp_values for c in to_concat]
indexes = [c.sp_index.to_int_index() for c in to_concat]
indices = []
loc = 0
for idx in indexes:
indices.append(idx.indices + loc)
loc += idx.length
sp_values = np.concatenate(sp_values)
indices = np.concatenate(indices)
sp_index = _make_index(loc, indices, kind=to_concat[0].sp_index)
return SparseArray(sp_values, sparse_index=sp_index,
fill_value=to_concat[0].fill_value)
# input may be sparse / dense mixed and may have different fill_value
# input must contain sparse at least 1
sparses = [c for c in to_concat if is_sparse(c)]
fill_values = [c.fill_value for c in sparses]
sp_indexes = [c.sp_index for c in sparses]
# densify and regular concat
to_concat = [convert_sparse(x, axis) for x in to_concat]
result = np.concatenate(to_concat, axis=axis)
if not len(typs - set(['sparse', 'f', 'i'])):
# sparsify if inputs are sparse and dense numerics
# first sparse input's fill_value and SparseIndex is used
result = SparseArray(result.ravel(), fill_value=fill_values[0],
kind=sp_indexes[0])
else:
# coerce to object if needed
result = result.astype('object')
return result
| mit |
jseabold/statsmodels | statsmodels/tsa/statespace/tests/test_impulse_responses.py | 3 | 26272 | """
Tests for impulse responses of time series
Author: Chad Fulton
License: Simplified-BSD
"""
import warnings
import numpy as np
import pandas as pd
from scipy.stats import ortho_group
import pytest
from numpy.testing import assert_, assert_allclose
from statsmodels.tools.sm_exceptions import EstimationWarning
from statsmodels.tsa.statespace import (mlemodel, sarimax, structural, varmax,
dynamic_factor)
def test_sarimax():
# AR(1)
mod = sarimax.SARIMAX([0], order=(1, 0, 0))
phi = 0.5
actual = mod.impulse_responses([phi, 1], steps=10)
desired = np.r_[[phi**i for i in range(11)]]
assert_allclose(actual, desired)
# MA(1)
mod = sarimax.SARIMAX([0], order=(0, 0, 1))
theta = 0.5
actual = mod.impulse_responses([theta, 1], steps=10)
desired = np.r_[1, theta, [0]*9]
assert_allclose(actual, desired)
# ARMA(2, 2) + constant
# Stata:
# webuse lutkepohl2
# arima dln_inc, arima(2, 0, 2)
# irf create irf1, set(irf1) step(10)
# irf table irf
params = [.01928228, -.03656216, .7588994,
.27070341, -.72928328, .01122177**0.5]
mod = sarimax.SARIMAX([0], order=(2, 0, 2), trend='c')
actual = mod.impulse_responses(params, steps=10)
desired = [1, .234141, .021055, .17692, .00951, .133917, .002321, .101544,
-.001951, .077133, -.004301]
assert_allclose(actual, desired, atol=1e-6)
# SARIMAX(1,1,1)x(1,0,1,4) + constant + exog
# Stata:
# webuse lutkepohl2
# gen exog = _n^2
# arima inc exog, arima(1,1,1) sarima(1,0,1,4)
# irf create irf2, set(irf2) step(10)
# irf table irf
params = [.12853289, 12.207156, .86384742, -.71463236,
.81878967, -.9533955, 14.043884**0.5]
exog = np.arange(1, 92)**2
mod = sarimax.SARIMAX(np.zeros(91), order=(1, 1, 1),
seasonal_order=(1, 0, 1, 4), trend='c', exog=exog,
simple_differencing=True)
actual = mod.impulse_responses(params, steps=10)
desired = [1, .149215, .128899, .111349, -.038417, .063007, .054429,
.047018, -.069598, .018641, .016103]
assert_allclose(actual, desired, atol=1e-6)
def test_structural():
steps = 10
# AR(1)
mod = structural.UnobservedComponents([0], autoregressive=1)
phi = 0.5
actual = mod.impulse_responses([1, phi], steps)
desired = np.r_[[phi**i for i in range(steps + 1)]]
assert_allclose(actual, desired)
# ARX(1)
# This is adequately tested in test_simulate.py, since in the time-varying
# case `impulse_responses` just calls `simulate`
# Irregular
mod = structural.UnobservedComponents([0], 'irregular')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Fixed intercept
# (in practice this is a deterministic constant, because an irregular
# component must be added)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = structural.UnobservedComponents([0], 'fixed intercept')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Deterministic constant
mod = structural.UnobservedComponents([0], 'deterministic constant')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Local level
mod = structural.UnobservedComponents([0], 'local level')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, 1)
# Random walk
mod = structural.UnobservedComponents([0], 'random walk')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 1)
# Fixed slope
# (in practice this is a deterministic trend, because an irregular
# component must be added)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = structural.UnobservedComponents([0], 'fixed slope')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Deterministic trend
mod = structural.UnobservedComponents([0], 'deterministic trend')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Local linear deterministic trend
mod = structural.UnobservedComponents(
[0], 'local linear deterministic trend')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, 1)
# Random walk with drift
mod = structural.UnobservedComponents([0], 'random walk with drift')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 1)
# Local linear trend
mod = structural.UnobservedComponents([0], 'local linear trend')
# - shock the level
actual = mod.impulse_responses([1., 1., 1.], steps)
assert_allclose(actual, 1)
# - shock the trend
actual = mod.impulse_responses([1., 1., 1.], steps, impulse=1)
assert_allclose(actual, np.arange(steps + 1))
# Smooth trend
mod = structural.UnobservedComponents([0], 'smooth trend')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, np.arange(steps + 1))
# Random trend
mod = structural.UnobservedComponents([0], 'random trend')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, np.arange(steps + 1))
# Seasonal (deterministic)
mod = structural.UnobservedComponents([0], 'irregular', seasonal=2,
stochastic_seasonal=False)
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Seasonal (stochastic)
mod = structural.UnobservedComponents([0], 'irregular', seasonal=2)
actual = mod.impulse_responses([1., 1.], steps)
desired = np.r_[1, np.tile([-1, 1], steps // 2)]
assert_allclose(actual, desired)
# Cycle (deterministic)
mod = structural.UnobservedComponents([0], 'irregular', cycle=True)
actual = mod.impulse_responses([1., 1.2], steps)
assert_allclose(actual, 0)
# Cycle (stochastic)
mod = structural.UnobservedComponents([0], 'irregular', cycle=True,
stochastic_cycle=True)
actual = mod.impulse_responses([1., 1., 1.2], steps=10)
x1 = [np.cos(1.2), np.sin(1.2)]
x2 = [-np.sin(1.2), np.cos(1.2)]
T = np.array([x1, x2])
desired = np.zeros(steps + 1)
states = [1, 0]
for i in range(steps + 1):
desired[i] += states[0]
states = np.dot(T, states)
assert_allclose(actual, desired)
def test_varmax():
steps = 10
# Clear warnings
varmax.__warningregistry__ = {}
# VAR(2) - single series
mod1 = varmax.VARMAX([[0]], order=(2, 0), trend='n')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.impulse_responses([0.5, 0.2, 1], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual, desired)
# VMA(2) - single series
mod1 = varmax.VARMAX([[0]], order=(0, 2), trend='n')
mod2 = sarimax.SARIMAX([0], order=(0, 0, 2))
actual = mod1.impulse_responses([0.5, 0.2, 1], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual, desired)
# VARMA(2, 2) - single series
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod1 = varmax.VARMAX([[0]], order=(2, 2), trend='n')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 2))
actual = mod1.impulse_responses([0.5, 0.2, 0.1, -0.2, 1], steps)
desired = mod2.impulse_responses([0.5, 0.2, 0.1, -0.2, 1], steps)
assert_allclose(actual, desired)
# VARMA(2, 2) + trend - single series
warning = EstimationWarning
match = r'VARMA\(p,q\) models is not'
with pytest.warns(warning, match=match):
mod1 = varmax.VARMAX([[0]], order=(2, 2), trend='c')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 2), trend='c')
actual = mod1.impulse_responses([10, 0.5, 0.2, 0.1, -0.2, 1], steps)
desired = mod2.impulse_responses([10, 0.5, 0.2, 0.1, -0.2, 1], steps)
assert_allclose(actual, desired)
# VAR(2) + constant
# Stata:
# webuse lutkepohl2
# var dln_inv dln_inc, lags(1/2)
# irf create irf3, set(irf3) step(10)
# irf table irf
# irf table oirf
params = [-.00122728, .01503679,
-.22741923, .71030531, -.11596357, .51494891,
.05974659, .02094608, .05635125, .08332519,
.04297918, .00159473, .01096298]
irf_00 = [1, -.227419, -.021806, .093362, -.001875, -.00906, .009605,
.001323, -.001041, .000769, .00032]
irf_01 = [0, .059747, .044015, -.008218, .007845, .004629, .000104,
.000451, .000638, .000063, .000042]
irf_10 = [0, .710305, .36829, -.065697, .084398, .043038, .000533,
.005755, .006051, .000548, .000526]
irf_11 = [1, .020946, .126202, .066419, .028735, .007477, .009878,
.003287, .001266, .000986, .0005]
oirf_00 = [0.042979, -0.008642, -0.00035, 0.003908, 0.000054, -0.000321,
0.000414, 0.000066, -0.000035, 0.000034, 0.000015]
oirf_01 = [0.001595, 0.002601, 0.002093, -0.000247, 0.000383, 0.000211,
0.00002, 0.000025, 0.000029, 4.30E-06, 2.60E-06]
oirf_10 = [0, 0.007787, 0.004037, -0.00072, 0.000925, 0.000472, 5.80E-06,
0.000063, 0.000066, 6.00E-06, 5.80E-06]
oirf_11 = [0.010963, 0.00023, 0.001384, 0.000728, 0.000315, 0.000082,
0.000108, 0.000036, 0.000014, 0.000011, 5.50E-06]
mod = varmax.VARMAX([[0, 0]], order=(2, 0), trend='c')
# IRFs
actual = mod.impulse_responses(params, steps, impulse=0)
assert_allclose(actual, np.c_[irf_00, irf_01], atol=1e-6)
actual = mod.impulse_responses(params, steps, impulse=1)
assert_allclose(actual, np.c_[irf_10, irf_11], atol=1e-6)
# Orthogonalized IRFs
actual = mod.impulse_responses(params, steps, impulse=0,
orthogonalized=True)
assert_allclose(actual, np.c_[oirf_00, oirf_01], atol=1e-6)
actual = mod.impulse_responses(params, steps, impulse=1,
orthogonalized=True)
assert_allclose(actual, np.c_[oirf_10, oirf_11], atol=1e-6)
# VARMA(2, 2) + trend + exog
# TODO: This is just a smoke test
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = varmax.VARMAX(
np.random.normal(size=(steps, 2)), order=(2, 2), trend='c',
exog=np.ones(steps), enforce_stationarity=False,
enforce_invertibility=False)
mod.impulse_responses(mod.start_params, steps)
def test_dynamic_factor():
steps = 10
exog = np.random.normal(size=steps)
# DFM: 2 series, AR(2) factor
mod1 = dynamic_factor.DynamicFactor([[0, 0]], k_factors=1, factor_order=2)
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.impulse_responses([-0.9, 0.8, 1., 1., 0.5, 0.2], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual[:, 0], -0.9 * desired)
assert_allclose(actual[:, 1], 0.8 * desired)
# DFM: 2 series, AR(2) factor, exog
mod1 = dynamic_factor.DynamicFactor(np.zeros((steps, 2)), k_factors=1,
factor_order=2, exog=exog)
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.impulse_responses(
[-0.9, 0.8, 5, -2, 1., 1., 0.5, 0.2], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual[:, 0], -0.9 * desired)
assert_allclose(actual[:, 1], 0.8 * desired)
# DFM, 3 series, VAR(2) factor, exog, error VAR
# TODO: This is just a smoke test
mod = dynamic_factor.DynamicFactor(np.random.normal(size=(steps, 3)),
k_factors=2, factor_order=2, exog=exog,
error_order=2, error_var=True,
enforce_stationarity=False)
mod.impulse_responses(mod.start_params, steps)
def test_time_varying_ssm():
# Create an ad-hoc time-varying model
mod = sarimax.SARIMAX([0] * 11, order=(1, 0, 0))
mod.update([0.5, 1.0])
T = np.zeros((1, 1, 11))
T[..., :5] = 0.5
T[..., 5:] = 0.2
mod['transition'] = T
irfs = mod.ssm.impulse_responses()
desired = np.cumprod(np.r_[1, [0.5] * 4, [0.2] * 5]).reshape(10, 1)
assert_allclose(irfs, desired)
class TVSS(mlemodel.MLEModel):
"""
Time-varying state space model for testing
This creates a state space model with randomly generated time-varying
system matrices. When used in a test, that test should use
`reset_randomstate` to ensure consistent test runs.
"""
def __init__(self, endog, _k_states=None):
k_states = 2
k_posdef = 2
# Allow subcasses to add additional states
if _k_states is None:
_k_states = k_states
super(TVSS, self).__init__(endog, k_states=_k_states,
k_posdef=k_posdef, initialization='diffuse')
self['obs_intercept'] = np.random.normal(
size=(self.k_endog, self.nobs))
self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
self['transition'] = np.zeros(
(self.k_states, self.k_states, self.nobs))
self['selection'] = np.zeros(
(self.k_states, self.ssm.k_posdef, self.nobs))
self['design', :, :k_states, :] = np.random.normal(
size=(self.k_endog, k_states, self.nobs))
# For the transition matrices, enforce eigenvalues not too far outside
# unit circle. Otherwise, the random draws will often lead to large
# eigenvalues that cause the covariance matrices to blow up to huge
# values during long periods of missing data, which leads to numerical
# problems and essentially spurious test failures
D = [np.diag(d)
for d in np.random.uniform(-1.1, 1.1, size=(self.nobs, k_states))]
Q = ortho_group.rvs(k_states, size=self.nobs)
self['transition', :k_states, :k_states, :] = (
Q @ D @ Q.transpose(0, 2, 1)).transpose(1, 2, 0)
self['selection', :k_states, :, :] = np.random.normal(
size=(k_states, self.ssm.k_posdef, self.nobs))
# Need to make sure the covariances are positive definite
H05 = np.random.normal(size=(self.k_endog, self.k_endog, self.nobs))
Q05 = np.random.normal(
size=(self.ssm.k_posdef, self.ssm.k_posdef, self.nobs))
H = np.zeros_like(H05)
Q = np.zeros_like(Q05)
for t in range(self.nobs):
H[..., t] = np.dot(H05[..., t], H05[..., t].T)
Q[..., t] = np.dot(Q05[..., t], Q05[..., t].T)
self['obs_cov'] = H
self['state_cov'] = Q
def clone(self, endog, exog=None, **kwargs):
mod = self.__class__(endog, **kwargs)
for key in self.ssm.shapes.keys():
if key in ['obs', 'state_intercept']:
continue
n = min(self.nobs, mod.nobs)
mod[key, ..., :n] = self.ssm[key, ..., :n]
return mod
def test_time_varying_in_sample(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute the max number of in-sample IRFs
irfs = mod.impulse_responses([], steps=mod.nobs - 1)
# Compute the same thing, but now with explicit anchor argument
irfs_anchor = mod.impulse_responses([], steps=mod.nobs - 1, anchor=0)
# Cumulative IRFs
cirfs = mod.impulse_responses([], steps=mod.nobs - 1, cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses([], steps=mod.nobs - 1, orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses([], steps=mod.nobs - 1, cumulative=True,
orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., 0]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs - 1, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs - 1, 2)) * np.nan
tmp = R[..., 0]
for i in range(1, mod.nobs):
desired_irfs[i - 1] = Z[:, :, i].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i].dot(tmp)
assert_allclose(irfs, desired_irfs)
assert_allclose(irfs_anchor, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_out_of_sample(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute all in-sample IRFs and also one out-of-sample IRF
new_Z = np.random.normal(size=mod['design', :, :, -1].shape)
new_T = np.random.normal(size=mod['transition', :, :, -1].shape)
irfs = mod.impulse_responses(
[], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None])
# Compute the same thing, but now with explicit anchor argument
irfs_anchor = mod.impulse_responses(
[], steps=mod.nobs, anchor=0, design=new_Z[:, :, None],
transition=new_T[:, :, None])
# Cumulative IRFs
cirfs = mod.impulse_responses([], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None],
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses([], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None],
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None], cumulative=True, orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., 0]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs, 2)) * np.nan
tmp = R[..., 0]
for i in range(1, mod.nobs):
desired_irfs[i - 1] = Z[:, :, i].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i].dot(tmp)
desired_irfs[mod.nobs - 1] = new_Z.dot(tmp)[:, 0]
desired_oirfs[mod.nobs - 1] = new_Z.dot(tmp).dot(L)[:, 0]
assert_allclose(irfs, desired_irfs)
assert_allclose(irfs_anchor, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_in_sample_anchored(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute the max number of in-sample IRFs
anchor = 2
irfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor)
# Cumulative IRFs
cirfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor,
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor,
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor,
cumulative=True, orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., anchor]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs - anchor - 1, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs - anchor - 1, 2)) * np.nan
tmp = R[..., anchor]
for i in range(1, mod.nobs - anchor):
desired_irfs[i - 1] = Z[:, :, i + anchor].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i + anchor].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i + anchor].dot(tmp)
assert_allclose(irfs, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_out_of_sample_anchored(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute all in-sample IRFs and also one out-of-sample IRF
anchor = 2
new_Z = mod['design', :, :, -1]
new_T = mod['transition', :, :, -1]
irfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor, design=new_Z[:, :, None],
transition=new_T[:, :, None])
# Cumulative IRFs
cirfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor,
design=new_Z[:, :, None], transition=new_T[:, :, None],
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor,
design=new_Z[:, :, None], transition=new_T[:, :, None],
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor,
design=new_Z[:, :, None], transition=new_T[:, :, None],
cumulative=True, orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., anchor]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs - anchor, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs - anchor, 2)) * np.nan
tmp = R[..., anchor]
for i in range(1, mod.nobs - anchor):
desired_irfs[i - 1] = Z[:, :, i + anchor].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i + anchor].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i + anchor].dot(tmp)
desired_irfs[mod.nobs - anchor - 1] = new_Z.dot(tmp)[:, 0]
desired_oirfs[mod.nobs - anchor - 1] = new_Z.dot(tmp).dot(L)[:, 0]
assert_allclose(irfs, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_out_of_sample_anchored_end(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Cannot compute the any in-sample IRFs when anchoring at the end
with pytest.raises(ValueError, match='Model has time-varying'):
mod.impulse_responses([], steps=2, anchor='end')
# Compute two out-of-sample IRFs
new_Z = np.random.normal(size=mod['design', :, :, -2:].shape)
new_T = np.random.normal(size=mod['transition', :, :, -2:].shape)
irfs = mod.impulse_responses([], steps=2, anchor='end',
design=new_Z, transition=new_T)
# Cumulative IRFs
cirfs = mod.impulse_responses(
[], steps=2, anchor='end', design=new_Z, transition=new_T,
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses(
[], steps=2, anchor='end', design=new_Z, transition=new_T,
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=2, anchor='end', design=new_Z, transition=new_T,
cumulative=True, orthogonalized=True)
# Compute IRFs manually
R = mod['selection']
Q = mod['state_cov', ..., -1]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((2, 2)) * np.nan
desired_oirfs = np.zeros((2, 2)) * np.nan
# desired[0] = 0
# Z_{T+1} R_T
tmp = R[..., -1]
desired_irfs[0] = new_Z[:, :, 0].dot(tmp)[:, 0]
desired_oirfs[0] = new_Z[:, :, 0].dot(tmp).dot(L)[:, 0]
# T_{T+1} R_T
tmp = new_T[..., 0].dot(tmp)
# Z_{T+2} T_{T+1} R_T
desired_irfs[1] = new_Z[:, :, 1].dot(tmp)[:, 0]
desired_oirfs[1] = new_Z[:, :, 1].dot(tmp).dot(L)[:, 0]
assert_allclose(irfs, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_pandas_univariate_rangeindex():
# Impulse responses have RangeIndex
endog = pd.Series(np.zeros(1))
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
actual = res.impulse_responses(2)
desired = pd.Series([1., 0.5, 0.25])
assert_allclose(res.impulse_responses(2), desired)
assert_(actual.index.equals(desired.index))
def test_pandas_univariate_dateindex():
# Impulse responses still have RangeIndex (i.e. aren't wrapped with dates)
ix = pd.date_range(start='2000', periods=1, freq='M')
endog = pd.Series(np.zeros(1), index=ix)
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
actual = res.impulse_responses(2)
desired = pd.Series([1., 0.5, 0.25])
assert_allclose(res.impulse_responses(2), desired)
assert_(actual.index.equals(desired.index))
def test_pandas_multivariate_rangeindex():
# Impulse responses have RangeIndex
endog = pd.DataFrame(np.zeros((1, 2)))
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
actual = res.impulse_responses(2)
desired = pd.DataFrame([[1., 0.5, 0.25], [0., 0., 0.]]).T
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_multivariate_dateindex():
# Impulse responses still have RangeIndex (i.e. aren't wrapped with dates)
ix = pd.date_range(start='2000', periods=1, freq='M')
endog = pd.DataFrame(np.zeros((1, 2)), index=ix)
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
actual = res.impulse_responses(2)
desired = pd.DataFrame([[1., 0.5, 0.25], [0., 0., 0.]]).T
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_anchor():
# Test that anchor with dates works
ix = pd.date_range(start='2000', periods=10, freq='M')
endog = pd.DataFrame(np.zeros((10, 2)), index=ix)
mod = TVSS(endog)
res = mod.filter([])
desired = res.impulse_responses(2, anchor=1)
# Anchor to date
actual = res.impulse_responses(2, anchor=ix[1])
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
# Anchor to negative index
actual = res.impulse_responses(2, anchor=-9)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
| bsd-3-clause |
nateGeorge/IDmyDog | process_ims/other/2d_haralick_map.py | 1 | 3493 | from __future__ import print_function
import pandas as pd
import pickle as pk
import cv2
import os
import re
import progressbar
import imutils
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mahotas.features import haralick
import json
from sklearn.decomposition import PCA
plt.style.use('seaborn-dark')
def get_fg_bg_rects(fg):
b, g, r, a = cv2.split(fg)
h, w = fg.shape[:2]
h -= 1
w -= 1 # to avoid indexing problems
rectDims = [10, 10] # h, w of rectangles
hRects = h / rectDims[0]
wRects = w / rectDims[1]
fgRects = []
bgRects = []
for i in range(wRects):
for j in range(hRects):
pt1 = (i * rectDims[0], j * rectDims[1])
pt2 = ((i + 1) * rectDims[0], (j + 1) * rectDims[1])
# alpha is 255 over the part of the dog
if a[pt1[1], pt1[0]] == 255 and a[pt2[1], pt2[0]] == 255:
fgRects.append([pt1, pt2])
#cv2.rectangle(fgcp, pt1, pt2, [0, 0, 255], 2) # for debugging
elif a[pt1[1], pt1[0]] == 0 and a[pt2[1], pt2[0]] == 0:
bgRects.append([pt1, pt2])
#cv2.rectangle(bgcp, pt1, pt2, [0, 0, 255], 2)
return fgRects, bgRects
def get_avg_hara(im, rects):
# returns the haralick texture averaged over all rectangles in an image
if len(rects)==0:
return None
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = 0
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara += haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0)
hara /= (len(rects))
return hara
def make_hara_map(im, rects):
# draws heatmap of haralick texture PCA dim1 variance
if len(rects)==0:
return None
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = []
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara.append(pcaFG.transform(haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0).reshape(1, -1)))
hara = np.array(hara)
haraMean = np.mean(hara, axis=0)
haraStd = np.std(hara, axis=0)
haraMins = np.min(hara, axis=0)
haraMaxs = np.max(hara, axis=0)
norm = (haraMaxs-haraMins)
copy = im.copy()
copy = cv2.cvtColor(copy, cv2.COLOR_BGRA2RGBA)
im = cv2.cvtColor(im, cv2.COLOR_BGRA2RGBA)
for i in range(hara.shape[0]):
brightScale = 255*(hara[i] - haraMins)/norm
bright = brightScale[0][0]
r = rects[i]
cv2.rectangle(copy, r[0], r[1], [0, bright, 0, 255], -1)
f, axarr = plt.subplots(2, 1)
axarr[0].imshow(copy)
axarr[1].imshow(im)
plt.show()
# load configuration
with open('../../config.json', 'rb') as f:
config = json.load(f)
mainImPath = config['image_dir']
pDir = config['pickle_dir']
pcaFG = pk.load(open(pDir + 'pcaFG.pk', 'rb'))
bb = pk.load(open(pDir + 'pDogs-bounding-boxes-clean.pd.pk', 'rb'))
bb.dropna(inplace=True)
# do something like sorted(bb.breed.unique().tolist())[50:] to check another breed
for breed in sorted(bb.breed.unique().tolist())[50:]:
print('breed:', breed)
cropDir = mainImPath + breed + '/grabcut/'
fgDir = cropDir + 'fg/'
fgFiles = os.listdir(fgDir)
for fi in fgFiles:
try:
fg = cv2.imread(fgDir + fi, -1) # -1 tells it to load alpha channel
except Exception as err:
print('exception:', err)
continue
fgRects, bgRects = get_fg_bg_rects(fg)
make_hara_map(fg, fgRects)
| mit |
joegomes/deepchem | examples/binding_pockets/binding_pocket_datasets.py | 9 | 6311 | """
PDBBind binding pocket dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
import shutil
import time
import re
from rdkit import Chem
import deepchem as dc
def compute_binding_pocket_features(pocket_featurizer, ligand_featurizer,
pdb_subdir, pdb_code, threshold=.3):
"""Compute features for a given complex"""
protein_file = os.path.join(pdb_subdir, "%s_protein.pdb" % pdb_code)
ligand_file = os.path.join(pdb_subdir, "%s_ligand.sdf" % pdb_code)
ligand_mol2 = os.path.join(pdb_subdir, "%s_ligand.mol2" % pdb_code)
# Extract active site
active_site_box, active_site_atoms, active_site_coords = (
dc.dock.binding_pocket.extract_active_site(
protein_file, ligand_file))
# Featurize ligand
mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
if mol is None:
return None, None
# Default for CircularFingerprint
n_ligand_features = 1024
ligand_features = ligand_featurizer.featurize([mol])
# Featurize pocket
finder = dc.dock.ConvexHullPocketFinder()
pockets, pocket_atoms, pocket_coords = finder.find_pockets(protein_file, ligand_file)
n_pockets = len(pockets)
n_pocket_features = dc.feat.BindingPocketFeaturizer.n_features
features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
pocket_features = pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms, pocket_coords)
# Note broadcast operation
features[:, :n_pocket_features] = pocket_features
features[:, n_pocket_features:] = ligand_features
# Compute labels for pockets
labels = np.zeros(n_pockets)
pocket_atoms[active_site_box] = active_site_atoms
for ind, pocket in enumerate(pockets):
overlap = dc.dock.binding_pocket.compute_overlap(
pocket_atoms, active_site_box, pocket)
if overlap > threshold:
labels[ind] = 1
else:
labels[ind] = 0
return features, labels
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe"""
# Some complexes have labels but no PDB files. Filter these manually
missing_pdbs = ["1d2v", "1jou", "1s8j", "1cam", "4mlt", "4o7d"]
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
# Some of the ligand-names are of form (FMN ox). Use regex
# to merge into form (FMN-ox)
p = re.compile('\(([^\)\s]*) ([^\)\s]*)\)')
line = p.sub('(\\1-\\2)', line)
elts = line.split()
# Filter if missing PDB files
if elts[0] in missing_pdbs:
continue
contents.append(elts)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
def featurize_pdbbind_pockets(data_dir=None, subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["active-site"]
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, "%s_pockets" % (subset))
if os.path.exists(data_dir):
return dc.data.DiskDataset(data_dir), tasks
pdbbind_dir = os.path.join(current_dir, "../pdbbind/v2015")
# Load PDBBind dataset
if subset == "core":
labels_file = os.path.join(pdbbind_dir, "INDEX_core_data.2013")
elif subset == "refined":
labels_file = os.path.join(pdbbind_dir, "INDEX_refined_data.2015")
elif subset == "full":
labels_file = os.path.join(pdbbind_dir, "INDEX_general_PL_data.2015")
else:
raise ValueError("Only core, refined, and full subsets supported.")
print("About to load contents.")
if not os.path.exists(labels_file):
raise ValueError("Run ../pdbbind/get_pdbbind.sh to download dataset.")
contents_df = load_pdbbind_labels(labels_file)
ids = contents_df["PDB code"].values
y = np.array([float(val) for val in contents_df["-logKd/Ki"].values])
# Define featurizers
pocket_featurizer = dc.feat.BindingPocketFeaturizer()
ligand_featurizer = dc.feat.CircularFingerprint(size=1024)
# Featurize Dataset
all_features = []
all_labels = []
missing_pdbs = []
all_ids = []
time1 = time.time()
for ind, pdb_code in enumerate(ids):
print("Processing complex %d, %s" % (ind, str(pdb_code)))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
if not os.path.exists(pdb_subdir):
print("%s is missing!" % pdb_subdir)
missing_pdbs.append(pdb_subdir)
continue
features, labels = compute_binding_pocket_features(
pocket_featurizer, ligand_featurizer, pdb_subdir, pdb_code)
if features is None:
print("Featurization failed!")
continue
all_features.append(features)
all_labels.append(labels)
ids = np.array(["%s%d" % (pdb_code, i) for i in range(len(labels))])
all_ids.append(ids)
time2 = time.time()
print("TIMING: PDBBind Pocket Featurization took %0.3f s" % (time2-time1))
X = np.vstack(all_features)
y = np.concatenate(all_labels)
w = np.ones_like(y)
ids = np.concatenate(all_ids)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids, data_dir=data_dir)
return dataset, tasks
def load_pdbbind_pockets(split="index", subset="core"):
"""Load PDBBind datasets. Does not do train/test split"""
dataset, tasks = featurize_pdbbind_pockets(subset=subset)
splitters = {'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter()}
splitter = splitters[split]
########################################################### DEBUG
print("dataset.X.shape")
print(dataset.X.shape)
print("dataset.y.shape")
print(dataset.y.shape)
print("dataset.w.shape")
print(dataset.w.shape)
print("dataset.ids.shape")
print(dataset.ids.shape)
########################################################### DEBUG
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
return tasks, (train, valid, test), transformers
| mit |
openbermuda/karmapi | karmapi/nzquake.py | 1 | 1633 | """ The data is available from Geonet, the official source of New
Zealand earthquake hazard data:
http://wfs.geonet.org.nz/geonet/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonet:quake_search_v1&outputFormat=csv
Geonet Data policy
==================
All data and images are made available free of charge through the
GeoNet project to facilitate research into hazards and assessment of
risk. GeoNet is sponsored by the New Zealand Government through its
agencies: Earthquake Commission (EQC), GNS Science and Land
Information New Zealand (LINZ). The use of data or images is subject
to the following conditions:
Users are requested to acknowledge the GeoNet project sponsors as the
source of the data or images. (Suggested text: We acknowledge the New
Zealand GeoNet project and its sponsors EQC, GNS Science and LINZ, for
providing data/images used in this study.)
The GeoNet project sponsors accept no liability for any loss or
damage, direct or indirect, resulting from the use of the data or
images provided. The GeoNet project sponsors do not make any
representation in respect of the information's accuracy, completeness
or fitness for any particular purpose.
"""
URL = "http://wfs.geonet.org.nz/geonet/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonet:quake_search_v1&outputFormat=csv"
from pathlib import Path
import requests
import karmapi
import pandas
def get(path):
path = Path(path)
r = requests.get(URL)
path.write_bytes(r.content)
def datefix(df):
tt = df.origintime.apply(lambda x: x[:19])
df.index = pandas.to_datetime(tt)
return df
| gpl-3.0 |
eduardoftoliveira/oniomMacGyver | scripts/draw_PES.py | 2 | 3836 | #!/usr/bin/env python
import matplotlib as mpl
from matplotlib import pyplot as plt
import argparse
def add_adiabatic_map_to_axis(axis, style, energies, color):
""" add single set of energies to plot """
# Energy horizontal decks
x = style['START']
for energy in energies:
axis.plot([x, x+style['WIDTH']], [energy, energy],
'-%s' % color, linewidth=2)
x += style['SPACING']
# Connect steps
x = style['START']
for i in range(1, len(energies)):
x1 = x + style['WIDTH']
x2 = x + style['SPACING']
y1 = energies[i-1]
y2 = energies[i]
axis.plot([x1, x2], [y1, y2], '-%s' % color)
x += style['SPACING']
def getargs():
parser = argparse.ArgumentParser(description="""
Make plot from user provided energies.
Can read multiple sets of energies.""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-o', '--output',
default='PES.svg',
help='File name of output figure')
parser.add_argument('--dpi',
default=300, type=int,
help='Resolution for bitmaps')
parser.add_argument('-e', '--energies',
nargs='+', type=float, action='append',
help='Energies for any number of stationary points')
parser.add_argument('-l', '--labels', nargs='+',
help='Name of stationary points')
parser.add_argument('-c', '--colors', nargs='+',
help='Color codes')
args = parser.parse_args()
# less colors than PES ? add 'k'
if args.colors:
missing_colors = len(args.energies) - len(args.colors)
missing_colors = (missing_colors > 0) * missing_colors
args.colors += 'k' * missing_colors
return args
def makelabels(N):
""" Make automatic labels: TS1, INT1, TS2, etc.."""
labels = ['R']
n_ts = N / 2
n_i = (N - 2) / 2
n_i = n_i * (n_i > 0) # becomes zero if negative
for i in range(n_ts + n_i):
if i % 2:
labels.append('INT%d' % (i/2+1))
else:
labels.append('TS%d' % (i/2+1))
if N % 2 and N >= 3:
labels.append('P')
return labels
def configure_axis_limits(axis, style, energies):
# Appearance
ymin, ymax = float('+inf'), float('-inf')
maxlen = 0
for energy_set in energies:
ymin = min(ymin, min(energy_set))
ymax = max(ymax, max(energy_set))
maxlen = max(len(energy_set), maxlen)
yrange = ymax-ymin
axis.set_ylim(ymin-0.1*yrange, ymax+0.1*yrange)
xmax = style['START']*2 + style['WIDTH'] + (maxlen-1)*style['SPACING']
axis.set_xlim(0, xmax)
axis.set_xticks([
style['START']+i*style['SPACING']+style['WIDTH']/2.0 for i in range(maxlen)])
return maxlen
def main():
# get user input
args = getargs()
# important style features
style = {
'WIDTH' : 4, # width of horizontal bars
'SPACING' : 10, # spacing between center of horizontal bars
'START' : 3 # x-offset from y-axis
}
# Configure Figure
fig = plt.gcf()
fig.set_size_inches(3.3, 2.5)
mpl.rcParams.update({'font.size': 7, 'axes.linewidth':0.5})
plt.subplots_adjust(bottom=.15)
plt.subplots_adjust(left=.15)
plt.ylabel('Energy (kcal/mol)')
plt.xlabel('Reaction coordinate')
ax = fig.gca()
ax.grid(True)
maxlen = configure_axis_limits(ax, style, args.energies)
if not args.labels:
args.labels = makelabels(maxlen)
ax.set_xticklabels(args.labels)
# plot stuff
color = 'k'
for j,energies in enumerate(args.energies):
if args.colors:
color = args.colors[j]
add_adiabatic_map_to_axis(ax, style, energies, color)
plt.savefig(args.output, dpi=args.dpi)
if __name__ == '__main__':
main()
| gpl-3.0 |
sambitgaan/nupic | examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py | 32 | 6059 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
tsherwen/AC_tools | Scripts/2D_GEOSChem_slice_subregion_plotter_example.py | 1 | 2934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plotter for 2D slices of GEOS-Chem output NetCDFs files.
NOTES
---
- This is setup for Cly, but many other options (plot/species) are availible
by just updating passed variables/plotting function called.
"""
import AC_tools as AC
import numpy as np
import matplotlib.pyplot as plt
def main():
"""
Basic plotter of NetCDF files using AC_tools
"""
# --- Local settings hardwired here...
fam = 'Cly' # Family to plot
# print species in family for reference...
print((AC.GC_var(fam)))
# --- Get working directory etc from command line (as a dictionary object)
# (1st argument is fil directory with folder, 2nd is filename)
Var_rc = AC.get_default_variable_dict()
# Get details on extracted data (inc. resolution)
Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc)
# --- extract data and units of data for family/species...
arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam,
res=Data_rc['res'], rtn_units=True, annual_mean=False)
# --- Process data (add and extra processing of data here... )
# take average over time
print((arr.shape))
arr = arr.mean(axis=-1)
# Select surface values
print((arr.shape))
arr = arr[..., 0]
# convert to pptv
arr = arr*1E12
units = 'pptv'
# --- Plot up data...
print((arr.shape))
# - Plot a (very) simple plot ...
# AC.map_plot( arr.T, res=Data_rc['res'] )
# - plot a slightly better plot...
# (loads of options here - just type help(AC.plot_spatial_figure) in ipython)
# set range for data...
fixcb = np.array([0., 100.])
# number of ticks on colorbar (make sure the fixcb range divides by this)
nticks = 6
interval = (1/3.) # number of lat/lon labels... (x*15 degrees... )
# set limits of plot
lat_min = 5.
lat_max = 75.
lon_min = -30.
lon_max = 60.
left_cb_pos = 0.85 # set X (fractional) position
axis_titles = True # add labels for lat and lon
# title for plot
title = "Plot of annual average {}".format(fam)
# save as pdf (just set to True) or show?
# figsize = (7,5) # figsize to use? (e.g. square or rectangular plot)
# call plotter...
AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb,
lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max,
axis_titles=axis_titles, left_cb_pos=left_cb_pos,
nticks=nticks, interval=interval, title=title, show=False)
# are the spacings right? - if not just up
bottom = 0.1
top = 0.9
left = 0.1
right = 0.9
fig = plt.gcf()
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
# show and save as PDF?
plt.savefig('pete_plot.png')
AC.show_plot()
if __name__ == "__main__":
main()
| mit |
takluyver/xray | xray/groupby.py | 1 | 12796 | import itertools
from common import ImplementsReduce
from ops import inject_reduce_methods
import variable
import dataset
import numpy as np
def unique_value_groups(ar):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
values, inverse = np.unique(ar, return_inverse=True)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
groups[g].append(n)
return values, groups
def peek_at(iterable):
"""Returns the first value from iterable, as well as a new iterable with
the same content as the original iterable
"""
gen = iter(iterable)
peek = gen.next()
return peek, itertools.chain([peek], gen)
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
XArray.groupby
DataArray.groupby
"""
def __init__(self, obj, group_coord, squeeze=True):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group_coord : DataArray
1-dimensional array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
"""
if group_coord.ndim != 1:
# TODO: remove this limitation?
raise ValueError('`group_coord` must be 1 dimensional')
self.obj = obj
self.group_coord = group_coord
self.group_dim, = group_coord.dimensions
expected_size = dataset.as_dataset(obj).dimensions[self.group_dim]
if group_coord.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
if group_coord.name in obj.dimensions:
# assume that group_coord already has sorted, unique values
if group_coord.dimensions != (group_coord.name,):
raise ValueError('`group_coord` is required to be a '
'coordinate variable if `group_coord.name` '
'is a dimension in `obj`')
group_indices = np.arange(group_coord.size)
if not squeeze:
# group_indices = group_indices.reshape(-1, 1)
# use slices to do views instead of fancy indexing
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group_coord
else:
# look through group_coord to find the unique values
unique_values, group_indices = unique_value_groups(group_coord)
# TODO: switch this to using the new DataArray constructor when we
# get around to writing it:
# unique_coord = xary.DataArray(unique_values, name=group_coord.name)
variables = {group_coord.name: (group_coord.name, unique_values)}
unique_coord = dataset.Dataset(variables)[group_coord.name]
self.group_indices = group_indices
self.unique_coord = unique_coord
self._groups = None
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self.unique_coord.values,
self.group_indices))
return self._groups
def __len__(self):
return self.unique_coord.size
def __iter__(self):
return itertools.izip(self.unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self.group_indices:
yield self.obj.indexed(**{self.group_dim: indices})
def _infer_concat_args(self, applied_example):
if self.group_dim in applied_example.dimensions:
concat_dim = self.group_coord
indexers = self.group_indices
else:
concat_dim = self.unique_coord
indexers = np.arange(self.unique_coord.size)
return concat_dim, indexers
@property
def _combine(self):
return type(self.obj).concat
class ArrayGroupBy(GroupBy, ImplementsReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields XArrays without metadata
"""
array = variable.as_variable(self.obj)
# build the new dimensions
if isinstance(self.group_indices[0], int):
# group_dim is squeezed out
dims = tuple(d for d in array.dimensions if d != self.group_dim)
else:
dims = array.dimensions
# slice the data and build the new Arrays directly
indexer = [slice(None)] * array.ndim
group_axis = array.get_axis_num(self.group_dim)
for indices in self.group_indices:
indexer[group_axis] = indices
data = array.values[tuple(indexer)]
yield variable.Variable(dims, data)
def _combine_shortcut(self, applied, concat_dim, indexers):
stacked = variable.Variable.concat(
applied, concat_dim, indexers, shortcut=True)
stacked.attrs.update(self.obj.attrs)
name = self.obj.name
ds = self.obj.dataset.unselect(name)
ds[concat_dim.name] = concat_dim
# remove extraneous dimensions
for dim in self.obj.dimensions:
if dim not in stacked.dimensions:
del ds[dim]
ds[name] = stacked
return ds[name]
def _restore_dim_order(self, stacked, concat_dim):
def lookup_order(dimension):
if dimension == self.group_coord.name:
dimension, = concat_dim.dimensions
if dimension in self.obj.dimensions:
axis = self.obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dimensions, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes, indices or other contained arrays) but
only on the data and dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar.
Returns
-------
applied : DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (func(arr, **kwargs) for arr in grouped)
# peek at applied to determine which coordinate to stack over
applied_example, applied = peek_at(applied)
concat_dim, indexers = self._infer_concat_args(applied_example)
if shortcut:
combined = self._combine_shortcut(applied, concat_dim, indexers)
else:
combined = self._combine(applied, concat_dim, indexers)
reordered = self._restore_dim_order(combined, concat_dim)
return reordered
def reduce(self, func, dimension=None, axis=None, shortcut=True,
**kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dimension : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dimension, axis, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
_reduce_method_docstring = \
"""Reduce the items in this group by applying `{name}` along some
dimension(s).
Parameters
----------
dimension : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over all dimension for each group item.
**kwargs : dict
Additional keyword arguments passed on to `{name}`.
Returns
-------
reduced : {cls}
New {cls} object with `{name}` applied to its data and the
indicated dimension(s) removed.
"""
inject_reduce_methods(ArrayGroupBy)
class DatasetGroupBy(GroupBy):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset
The result of splitting, applying and combining this dataset.
"""
applied = [func(ds, **kwargs) for ds in self._iter_grouped()]
concat_dim, indexers = self._infer_concat_args(applied[0])
combined = self._combine(applied, concat_dim, indexers)
return combined
| apache-2.0 |
QuLogic/vispy | examples/basics/plotting/mpl_plot.py | 14 | 1579 | # -*- coding: utf-8 -*-
# vispy: testskip
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Example demonstrating how to use vispy.pyplot, which uses mplexporter
to convert matplotlib commands to vispy draw commands.
Requires matplotlib.
"""
import numpy as np
# You can use either matplotlib or vispy to render this example:
# import matplotlib.pyplot as plt
import vispy.mpl_plot as plt
from vispy.io import read_png, load_data_file
n = 200
freq = 10
fs = 100.
t = np.arange(n) / fs
tone = np.sin(2*np.pi*freq*t)
noise = np.random.RandomState(0).randn(n)
signal = tone + noise
magnitude = np.abs(np.fft.fft(signal))
freqs = np.fft.fftfreq(n, 1. / fs)
flim = n // 2
# Signal
fig = plt.figure()
ax = plt.subplot(311)
ax.imshow(read_png(load_data_file('pyplot/logo.png')))
ax = plt.subplot(312)
ax.plot(t, signal, 'k-')
# Frequency content
ax = plt.subplot(313)
idx = np.argmax(magnitude[:flim])
ax.text(freqs[idx], magnitude[idx], 'Max: %s Hz' % freqs[idx],
verticalalignment='top')
ax.plot(freqs[:flim], magnitude[:flim], 'k-o')
plt.draw()
# NOTE: show() has currently been overwritten to convert to vispy format, so:
# 1. It must be called to show the results, and
# 2. Any plotting commands executed after this will not take effect.
# We are working to remove this limitation.
if __name__ == '__main__':
plt.show(True)
| bsd-3-clause |
waddell/urbansim | urbansim/urbanchoice/mnl.py | 4 | 9002 | """
Number crunching code for multinomial logit.
``mnl_estimate`` and ``mnl_simulate`` especially are used by
``urbansim.models.lcm``.
"""
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
import scipy.optimize
import pmat
from pmat import PMAT
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
# right now MNL can only estimate location choice models, where every equation
# is the same
# it might be better to use stats models for a non-location choice problem
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_probs(data, beta, numalts):
logging.debug('start: calculate MNL probabilities')
clamp = data.typ == 'numpy'
utilities = beta.multiply(data)
if numalts == 0:
raise Exception("Number of alternatives is zero")
utilities.reshape(numalts, utilities.size() / numalts)
exponentiated_utility = utilities.exp(inplace=True)
if clamp:
exponentiated_utility.inftoval(1e20)
if clamp:
exponentiated_utility.clamptomin(1e-300)
sum_exponentiated_utility = exponentiated_utility.sum(axis=0)
probs = exponentiated_utility.divide_by_row(
sum_exponentiated_utility, inplace=True)
if clamp:
probs.nantoval(1e-300)
if clamp:
probs.clamptomin(1e-300)
logging.debug('finish: calculate MNL probabilities')
return probs
def get_hessian(derivative):
return np.linalg.inv(np.dot(derivative, np.transpose(derivative)))
def get_standard_error(hessian):
return np.sqrt(np.diagonal(hessian))
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_loglik(beta, data, chosen, numalts, weights=None, lcgrad=False,
stderr=0):
logger.debug('start: calculate MNL log-likelihood')
numvars = beta.size
numobs = data.size() / numvars / numalts
beta = np.reshape(beta, (1, beta.size))
beta = PMAT(beta, data.typ)
probs = mnl_probs(data, beta, numalts)
# lcgrad is the special gradient for the latent class membership model
if lcgrad:
assert weights
gradmat = weights.subtract(probs).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
else:
if not weights:
gradmat = chosen.subtract(probs).reshape(probs.size(), 1)
else:
gradmat = chosen.subtract(probs).multiply_by_row(
weights).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
if stderr:
gradmat = data.multiply_by_row(gradmat.reshape(1, gradmat.size()))
gradmat.reshape(numvars, numalts * numobs)
return get_standard_error(get_hessian(gradmat.get_mat()))
chosen.reshape(numalts, numobs)
if weights is not None:
if probs.shape() == weights.shape():
loglik = ((probs.log(inplace=True)
.element_multiply(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = ((probs.log(inplace=True)
.multiply_by_row(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = (probs.log(inplace=True).element_multiply(
chosen, inplace=True)).sum(axis=1).sum(axis=0)
if loglik.typ == 'numpy':
loglik, gradarr = loglik.get_mat(), gradarr.get_mat().flatten()
else:
loglik = loglik.get_mat()[0, 0]
gradarr = np.reshape(gradarr.get_mat(), (1, gradarr.size()))[0]
logger.debug('finish: calculate MNL log-likelihood')
return -1 * loglik, -1 * gradarr
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True):
"""
Get the probabilities for each chooser choosing between `numalts`
alternatives.
Parameters
----------
data : 2D array
The data are expected to be in "long" form where each row is for
one alternative. Alternatives are in groups of `numalts` rows per
choosers. Alternatives must be in the same order for each chooser.
coeff : 1D array
The model coefficients corresponding to each column in `data`.
numalts : int
The number of alternatives available to each chooser.
GPU : bool, optional
returnprobs : bool, optional
If True, return the probabilities for each chooser/alternative instead
of actual choices.
Returns
-------
probs or choices: 2D array
If `returnprobs` is True the probabilities are a 2D array with a
row for each chooser and columns for each alternative.
"""
logger.debug(
'start: MNL simulation with len(data)={} and numalts={}'.format(
len(data), numalts))
atype = 'numpy' if not GPU else 'cuda'
data = np.transpose(data)
coeff = np.reshape(np.array(coeff), (1, len(coeff)))
data, coeff = PMAT(data, atype), PMAT(coeff, atype)
probs = mnl_probs(data, coeff, numalts)
if returnprobs:
return np.transpose(probs.get_mat())
# convert to cpu from here on - gpu doesn't currently support these ops
if probs.typ == 'cuda':
probs = PMAT(probs.get_mat())
probs = probs.cumsum(axis=0)
r = pmat.random(probs.size() / numalts)
choices = probs.subtract(r, inplace=True).firstpositive(axis=0)
logger.debug('finish: MNL simulation')
return choices.get_mat()
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3),
weights=None, lcgrad=False, beta=None):
"""
Calculate coefficients of the MNL model.
Parameters
----------
data : 2D array
The data are expected to be in "long" form where each row is for
one alternative. Alternatives are in groups of `numalts` rows per
choosers. Alternatives must be in the same order for each chooser.
chosen : 2D array
This boolean array has a row for each chooser and a column for each
alternative. The column ordering for alternatives is expected to be
the same as their row ordering in the `data` array.
A one (True) indicates which alternative each chooser has chosen.
numalts : int
The number of alternatives.
GPU : bool, optional
coeffrange : tuple of floats, optional
Limits of (min, max) to which coefficients are clipped.
weights : ndarray, optional
lcgrad : bool, optional
beta : 1D array, optional
Any initial guess for the coefficients.
Returns
-------
log_likelihood : dict
Dictionary of log-likelihood values describing the quality of
the model fit.
fit_parameters : pandas.DataFrame
Table of fit parameters with columns 'Coefficient', 'Std. Error',
'T-Score'. Each row corresponds to a column in `data` and are given
in the same order as in `data`.
See Also
--------
scipy.optimize.fmin_l_bfgs_b : The optimization routine used.
"""
logger.debug(
'start: MNL fit with len(data)={} and numalts={}'.format(
len(data), numalts))
atype = 'numpy' if not GPU else 'cuda'
numvars = data.shape[1]
numobs = data.shape[0] / numalts
if chosen is None:
chosen = np.ones((numobs, numalts)) # used for latent classes
data = np.transpose(data)
chosen = np.transpose(chosen)
data, chosen = PMAT(data, atype), PMAT(chosen, atype)
if weights is not None:
weights = PMAT(np.transpose(weights), atype)
if beta is None:
beta = np.zeros(numvars)
bounds = [coeffrange] * numvars
with log_start_finish('scipy optimization for MNL fit', logger):
args = (data, chosen, numalts, weights, lcgrad)
bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik,
beta,
args=args,
fprime=None,
factr=10,
approx_grad=False,
bounds=bounds
)
beta = bfgs_result[0]
stderr = mnl_loglik(
beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad)
l0beta = np.zeros(numvars)
l0 = -1 * mnl_loglik(l0beta, *args)[0]
l1 = -1 * mnl_loglik(beta, *args)[0]
log_likelihood = {
'null': float(l0[0][0]),
'convergence': float(l1[0][0]),
'ratio': float((1 - (l1 / l0))[0][0])
}
fit_parameters = pd.DataFrame({
'Coefficient': beta,
'Std. Error': stderr,
'T-Score': beta / stderr})
logger.debug('finish: MNL fit')
return log_likelihood, fit_parameters
| bsd-3-clause |
aje/POT | examples/plot_optim_OTreg.py | 2 | 2940 | # -*- coding: utf-8 -*-
"""
==================================
Regularized OT with generic solver
==================================
Illustrates the use of the generic solver for regularized OT with
user-designed regularization term. It uses Conditional gradient as in [6] and
generalized Conditional Gradient as proposed in [5][7].
[5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, Optimal Transport for
Domain Adaptation, in IEEE Transactions on Pattern Analysis and Machine
Intelligence , vol.PP, no.99, pp.1-1.
[6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014).
Regularized discrete optimal transport. SIAM Journal on Imaging Sciences,
7(3), 1853-1882.
[7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized
conditional gradient: analysis of convergence and applications.
arXiv preprint arXiv:1510.06567.
"""
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve EMD with Frobenius norm regularization
# --------------------------------------------
#%% Example with Frobenius norm regularization
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
Gl2 = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(3)
ot.plot.plot1D_mat(a, b, Gl2, 'OT matrix Frob. reg')
##############################################################################
# Solve EMD with entropic regularization
# --------------------------------------
#%% Example with entropic regularization
def f(G):
return np.sum(G * np.log(G))
def df(G):
return np.log(G) + 1.
reg = 1e-3
Ge = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Ge, 'OT matrix Entrop. reg')
##############################################################################
# Solve EMD with Frobenius norm + entropic regularization
# -------------------------------------------------------
#%% Example with Frobenius norm + entropic regularization with gcg
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
Gel2 = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True)
pl.figure(5, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gel2, 'OT entropic + matrix Frob. reg')
pl.show()
| mit |
cheminfo/RDKitjs | old/src/similarityMap_basic_functions.py | 1 | 3270 | def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0):
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True, imageType=None, fitImage=False, options=None, **kwargs):
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor=None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in iteritems(omol._atomPs):
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0] # this is not bivariate case ... only univariate no mixtures #matplotlib.mlab.bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0)
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
mjudsp/Tsallis | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
carrillo/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
JonnyWong16/plexpy | lib/tqdm/_tqdm_gui.py | 4 | 13326 | """
GUI progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm_gui import tgrange[, tqdm_gui]
>>> for i in tgrange(10): #same as: for i in tqdm_gui(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
# import sys
from time import time
from ._utils import _range
# to inherit from the tqdm class
from ._tqdm import tqdm, TqdmExperimentalWarning
from warnings import warn
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange']
class tqdm_gui(tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
# TODO: @classmethod: write() on GUI?
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
warn('GUI is experimental/alpha', TqdmExperimentalWarning)
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
if self.total:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if self.total:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
plt = self.plt
ax = self.ax
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
line1 = self.line1
line2 = self.line2
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
# Inline due to multiple calls
total = self.total
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
if n < 0:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
# Inline due to multiple calls
total = self.total
ax = self.ax
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = self.n / elapsed
# update line data
self.xdata.append(self.n * 100.0 / total
if total else cur_t)
self.ydata.append(y)
self.zdata.append(z)
# Discard old values
if (not total) and elapsed > 66:
self.xdata.popleft()
self.ydata.popleft()
self.zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
self.line1.set_data(self.xdata, self.ydata)
self.line2.set_data(self.xdata, self.zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [self.n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in self.xdata]
self.line1.set_data(t_ago, self.ydata)
self.line2.set_data(t_ago, self.zdata)
ax.set_title(self.format_meter(
self.n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
self.plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
self.disable = True
self._instances.remove(self)
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
def tgrange(*args, **kwargs):
"""
A shortcut for tqdm_gui(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm_gui(_range(*args), **kwargs)
| gpl-3.0 |
zhenv5/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/extension/test_numpy.py | 2 | 12536 | import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p16
import pandas as pd
from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
import pandas.util.testing as tm
from . import base
@pytest.fixture(params=["float", "object"])
def dtype(request):
return PandasDtype(np.dtype(request.param))
@pytest.fixture
def allow_in_pandas(monkeypatch):
"""
A monkeypatch to tells pandas to let us in.
By default, passing a PandasArray to an index / series / frame
constructor will unbox that PandasArray to an ndarray, and treat
it as a non-EA column. We don't want people using EAs without
reason.
The mechanism for this is a check against ABCPandasArray
in each constructor.
But, for testing, we need to allow them in pandas. So we patch
the _typ of PandasArray, so that we evade the ABCPandasArray
check.
"""
with monkeypatch.context() as m:
m.setattr(PandasArray, "_typ", "extension")
yield
@pytest.fixture
def data(allow_in_pandas, dtype):
if dtype.numpy_dtype == "object":
return pd.Series([(i,) for i in range(100)]).array
return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@pytest.fixture
def data_missing(allow_in_pandas, dtype):
# For NumPy <1.16, np.array([np.nan, (1,)]) raises
# ValueError: setting an array element with a sequence.
if dtype.numpy_dtype == "object":
if _np_version_under1p16:
raise pytest.skip("Skipping for NumPy <1.16")
return PandasArray(np.array([np.nan, (1,)]))
return PandasArray(np.array([np.nan, 1.0]))
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
def cmp(a, b):
return np.isnan(a) and np.isnan(b)
return cmp
@pytest.fixture
def data_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
if dtype.numpy_dtype == "object":
# Use an empty tuple for first element, then remove,
# to disable np.array's shape inference.
return PandasArray(np.array([(), (2,), (3,), (1,)])[1:])
return PandasArray(np.array([1, 2, 0]))
@pytest.fixture
def data_missing_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
if dtype.numpy_dtype == "object":
return PandasArray(np.array([(1,), np.nan, (0,)]))
return PandasArray(np.array([1, np.nan, 0]))
@pytest.fixture
def data_for_grouping(allow_in_pandas, dtype):
"""Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
if dtype.numpy_dtype == "object":
a, b, c = (1,), (2,), (3,)
else:
a, b, c = np.arange(3)
return PandasArray(np.array([b, b, np.nan, np.nan, a, a, b, c]))
@pytest.fixture
def skip_numpy_object(dtype):
"""
Tests for PandasArray with nested data. Users typically won't create
these objects via `pd.array`, but they can show up through `.array`
on a Series with nested data. Many of the base tests fail, as they aren't
appropriate for nested data.
This fixture allows these tests to be skipped when used as a usefixtures
marker to either an individual test or a test class.
"""
if dtype == "object":
raise pytest.skip("Skipping for object dtype.")
skip_nested = pytest.mark.usefixtures("skip_numpy_object")
class BaseNumPyTests:
pass
class TestCasting(BaseNumPyTests, base.BaseCastingTests):
@skip_nested
def test_astype_str(self, data):
# ValueError: setting an array element with a sequence
super().test_astype_str(data)
class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
@pytest.mark.skip(reason="We don't register our dtype")
# We don't want to register. This test should probably be split in two.
def test_from_dtype(self, data):
pass
@skip_nested
def test_array_from_scalars(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_array_from_scalars(data)
class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
@pytest.mark.skip(reason="Incorrect expected.")
# we unsurprisingly clash with a NumPy name.
def test_check_dtype(self, data):
pass
class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
@skip_nested
def test_getitem_scalar(self, data):
# AssertionError
super().test_getitem_scalar(data)
@skip_nested
def test_take_series(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_take_series(data)
@pytest.mark.xfail(reason="astype doesn't recognize data.dtype")
def test_loc_iloc_frame_single_dtype(self, data):
super().test_loc_iloc_frame_single_dtype(data)
class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
@skip_nested
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
# ValueError: Names should be list-like for a MultiIndex
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
@skip_nested
def test_array_interface(self, data):
# NumPy array shape inference
super().test_array_interface(data)
class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
@pytest.mark.skip(reason="TODO: remove?")
def test_value_counts(self, all_data, dropna):
pass
@pytest.mark.skip(reason="Incorrect expected")
# We have a bool dtype, so the result is an ExtensionArray
# but expected is not
def test_combine_le(self, data_repeated):
super().test_combine_le(data_repeated)
@skip_nested
def test_combine_add(self, data_repeated):
# Not numeric
super().test_combine_add(data_repeated)
@skip_nested
def test_shift_fill_value(self, data):
# np.array shape inference. Shift implementation fails.
super().test_shift_fill_value(data)
@skip_nested
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
# Fails creating expected
super().test_unique(data, box, method)
@skip_nested
def test_fillna_copy_frame(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_frame(data_missing)
@skip_nested
def test_fillna_copy_series(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_series(data_missing)
@skip_nested
def test_hash_pandas_object_works(self, data, as_frame):
# ndarray of tuples not hashable
super().test_hash_pandas_object_works(data, as_frame)
@skip_nested
def test_searchsorted(self, data_for_sorting, as_series):
# Test setup fails.
super().test_searchsorted(data_for_sorting, as_series)
@skip_nested
def test_where_series(self, data, na_value, as_frame):
# Test setup fails.
super().test_where_series(data, na_value, as_frame)
@skip_nested
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
def test_repeat(self, data, repeats, as_series, use_numpy):
# Fails creating expected
super().test_repeat(data, repeats, as_series, use_numpy)
@skip_nested
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
divmod_exc = None
series_scalar_exc = None
frame_scalar_exc = None
series_array_exc = None
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data, exc=None)
@pytest.mark.skip("We implement ops")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
super().test_arith_series_with_array(data, all_arithmetic_operators)
class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
pass
@skip_nested
class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
# avoid coercing int -> float. Just cast to the actual numpy type.
expected = getattr(s.astype(s.dtype._dtype), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
@skip_nested
class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests):
pass
class TestMissing(BaseNumPyTests, base.BaseMissingTests):
@skip_nested
def test_fillna_scalar(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_scalar(data_missing)
@skip_nested
def test_fillna_series_method(self, data_missing, fillna_method):
# Non-scalar "scalar" values.
super().test_fillna_series_method(data_missing, fillna_method)
@skip_nested
def test_fillna_series(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_series(data_missing)
@skip_nested
def test_fillna_frame(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_frame(data_missing)
class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
@pytest.mark.skip("Incorrect parent test")
# not actually a mixed concat, since we concat int and int.
def test_concat_mixed_dtypes(self, data):
super().test_concat_mixed_dtypes(data)
@skip_nested
def test_merge(self, data, na_value):
# Fails creating expected
super().test_merge(data, na_value)
@skip_nested
def test_merge_on_extension_array(self, data):
# Fails creating expected
super().test_merge_on_extension_array(data)
@skip_nested
def test_merge_on_extension_array_duplicates(self, data):
# Fails creating expected
super().test_merge_on_extension_array_duplicates(data)
class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
@skip_nested
def test_setitem_scalar_series(self, data, box_in_series):
# AssertionError
super().test_setitem_scalar_series(data, box_in_series)
@skip_nested
def test_setitem_sequence(self, data, box_in_series):
# ValueError: shape mismatch: value array of shape (2,1) could not
# be broadcast to indexing result of shape (2,)
super().test_setitem_sequence(data, box_in_series)
@skip_nested
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
# ValueError: PandasArray must be 1-dimensional.
super().test_setitem_sequence_mismatched_length_raises(data, as_array)
@skip_nested
def test_setitem_sequence_broadcasts(self, data, box_in_series):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_sequence_broadcasts(data, box_in_series)
@skip_nested
def test_setitem_loc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_loc_scalar_mixed(data)
@skip_nested
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_loc_scalar_multiple_homogoneous(data)
@skip_nested
def test_setitem_iloc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_iloc_scalar_mixed(data)
@skip_nested
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_iloc_scalar_multiple_homogoneous(data)
@skip_nested
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_mask_broadcast(data, setter)
@skip_nested
def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
@skip_nested
class TestParsing(BaseNumPyTests, base.BaseParsingTests):
pass
| apache-2.0 |
ati-ozgur/KDD99ReviewArticle | HelperCodes/create_table_JournalAndArticleCounts.py | 1 | 1930 | import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
#df_journal = df.groupby('journal')["ID"]
dfJournalList = df.groupby(['journal'])['ID'].count().order(ascending=False)
isOdd = (dfJournalList.size % 2 == 1)
if (isOdd):
table_row_length = dfJournalList.size / 2 +1
else:
table_row_length = dfJournalList.size / 2
table_content_inside=""
for index in range(table_row_length):
journal_name_1column = dfJournalList.index[index]
journal_count_1column = dfJournalList[index]
second_column_index = index + table_row_length
if(second_column_index < dfJournalList.size):
journal_name_2column = dfJournalList.index[second_column_index]
journal_count_2column = dfJournalList[second_column_index]
else:
journal_name_2column = ""
journal_count_2column = ""
line = "{journal_name_1column} & {journal_count_1column} & {journal_name_2column} & {journal_count_2column} \\\\ \n".format(
journal_name_1column = journal_name_1column
,journal_count_1column = journal_count_1column
,journal_name_2column = journal_name_2column
,journal_count_2column = journal_count_2column
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table*}[!ht]
\\caption{ \\textbf{Journals and Article Counts} }
\\label{table-JournalAndArticleCounts}
\\centering
\\begin{adjustbox}{max width=\\textwidth}
\\normalsize
\\begin{tabular}{llll}
\\toprule
Journal Name & Article Count & Journal Name & Article Count \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{adjustbox}
\\end{table*}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
filename = "../latex/table-JournalAndArticleCounts.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
| mit |
samuel1208/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/tslibs/test_parsing.py | 2 | 5799 | # -*- coding: utf-8 -*-
"""
Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
"""
from datetime import datetime
from dateutil.parser import parse
import numpy as np
import pytest
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_time_string
import pandas.util._test_decorators as td
from pandas.util import testing as tm
def test_parse_time_string():
(date, parsed, reso) = parse_time_string("4Q1984")
(date_lower, parsed_lower, reso_lower) = parse_time_string("4q1984")
assert date == date_lower
assert reso == reso_lower
assert parsed == parsed_lower
@pytest.mark.parametrize("dashed,normal", [
("1988-Q2", "1988Q2"),
("2Q-1988", "2Q1988")
])
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
(date_dash, parsed_dash, reso_dash) = parse_time_string(dashed)
(date, parsed, reso) = parse_time_string(normal)
assert date_dash == date
assert parsed_dash == parsed
assert reso_dash == reso
@pytest.mark.parametrize("dashed", [
"-2Q1992", "2-Q1992", "4-4Q1992"
])
def test_parse_time_quarter_with_dash_error(dashed):
msg = ("Unknown datetime string format, "
"unable to parse: {dashed}".format(dashed=dashed))
with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@pytest.mark.parametrize("date_string,expected", [
("123.1234", False),
("-50000", False),
("999", False),
("m", False),
("T", False),
("Mon Sep 16, 2013", True),
("2012-01-01", True),
("01/01/2012", True),
("01012012", True),
("0101", True),
("1-1", True)
])
def test_does_not_convert_mixed_integer(date_string, expected):
assert parsing._does_string_look_like_datetime(date_string) is expected
@pytest.mark.parametrize("date_str,kwargs,msg", [
("2013Q5", dict(),
("Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5")),
# see gh-5418
("2013Q1", dict(freq="INVLD-L-DEC-SAT"),
("Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"))
])
def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
with pytest.raises(parsing.DateParseError, match=msg):
parsing.parse_time_string(date_str, **kwargs)
@pytest.mark.parametrize("date_str,freq,expected", [
("2013Q2", None, datetime(2013, 4, 1)),
("2013Q2", "A-APR", datetime(2012, 8, 1)),
("2013-Q2", "A-DEC", datetime(2013, 4, 1))
])
def test_parsers_quarterly_with_freq(date_str, freq, expected):
result, _, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@pytest.mark.parametrize("date_str", [
"2Q 2005", "2Q-200A", "2Q-200",
"22Q2005", "2Q200.", "6Q-20"
])
def test_parsers_quarter_invalid(date_str):
if date_str == "6Q-20":
msg = ("Incorrect quarterly string is given, quarter "
"must be between 1 and 4: {date_str}".format(date_str=date_str))
else:
msg = ("Unknown datetime string format, unable "
"to parse: {date_str}".format(date_str=date_str))
with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
@pytest.mark.parametrize("date_str,expected", [
("201101", datetime(2011, 1, 1, 0, 0)),
("200005", datetime(2000, 5, 1, 0, 0))
])
def test_parsers_month_freq(date_str, expected):
result, _, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize("string,fmt", [
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f")
])
def test_guess_datetime_format_with_parseable_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("dayfirst,expected", [
(True, "%d/%m/%Y"),
(False, "%m/%d/%Y")
])
def test_guess_datetime_format_with_dayfirst(dayfirst, expected):
ambiguous_string = "01/01/2011"
result = parsing._guess_datetime_format(ambiguous_string,
dayfirst=dayfirst)
assert result == expected
@td.skip_if_has_locale
@pytest.mark.parametrize("string,fmt", [
("30/Dec/2011", "%d/%b/%Y"),
("30/December/2011", "%d/%B/%Y"),
("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S")
])
def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("invalid_dt", [
"2013", "01/2013", "12:00:00", "1/1/1/1",
"this_is_not_a_datetime", "51a", 9,
datetime(2011, 1, 1)
])
def test_guess_datetime_format_invalid_inputs(invalid_dt):
# A datetime string must include a year, month and a day for it to be
# guessable, in addition to being a string that looks like a datetime.
assert parsing._guess_datetime_format(invalid_dt) is None
@pytest.mark.parametrize("string,fmt", [
("2011-1-1", "%Y-%m-%d"),
("1/1/2011", "%m/%d/%Y"),
("30-1-2011", "%d-%m-%Y"),
("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"),
("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S")
])
def test_guess_datetime_format_no_padding(string, fmt):
# see gh-11142
result = parsing._guess_datetime_format(string)
assert result == fmt
def test_try_parse_dates():
arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
skdaccess/skdaccess | skdaccess/engineering/la/generic/stream.py | 2 | 3472 | # The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Standard library imports
from collections import OrderedDict
from io import StringIO
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party imports
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
import pandas as pd
class DataFetcher(DataFetcherStream):
"""
Class for handling data requests to data.lacity.org
"""
def __init__(self, endpoint, parameters, label, verbose=False, app_token = None, **pandas_kwargs):
"""
Initialize Data Fetcher for accessing data.lacity.org
@param endpoint: Data endpoint string
@param parameters: Parameters to use when retrieving dta
@param label: Label of pandas dataframe
@param verbose: Print out extra information
@param app_token: Application token to use to avoid throttling issues
@param date_columns
@param pandas_kwargs: Any additional key word arguments are passed to pandas.read_csv
"""
self.base_url = 'https://data.lacity.org/resource/'
self.base_url_and_endpoint = self.base_url + endpoint + '.csv?'
self.parameters = parameters
self.label = label
self.app_token = app_token
self.pandas_kwargs = pandas_kwargs
if '$$app_token' in parameters:
raise RuntimeError("Use app_token option in constructor instead of manually " +
"adding it into the the parameters")
if app_token != None:
self.parameters['$$app_token'] = app_token
super(DataFetcher, self).__init__([], verbose)
def output(self):
"""
Retrieve data from data.lacity.org
@return Table wrapper of containing specified data
"""
data_dict = OrderedDict()
url_query = self.base_url_and_endpoint + urlencode(self.parameters)
with urlopen(url_query) as remote_resource:
raw_string = remote_resource.read().decode()
string_data = StringIO(raw_string)
data_dict[self.label] = pd.read_csv(string_data, **self.pandas_kwargs)
return TableWrapper(data_dict)
| mit |
robket/BioScripts | alignment.py | 1 | 9138 | import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import toimage
from collections import defaultdict, Counter
from types import SimpleNamespace
from PIL import ImageDraw
# This color table is sourced from https://github.com/trident01/BioExt-1/blob/master/AlignmentImage.java
LIGHT_GRAY = 196
FIXED_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"A": [255, 0, 0],
"C": [255, 255, 0],
"T": [0, 255, 0],
"G": [190, 0, 95],
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
GRAY_GAPS_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
BLACK_COLOR_TABLE = defaultdict(lambda: [0, 0, 0])
class Alignment:
def __init__(self, query_start, query_seq, target_start, target_seq, sequence_name, target_label, expected_errors):
self.name = sequence_name
self.target_label = target_label
self.expected_errors = expected_errors
self.query_start = int(query_start) - 1
self.query_seq = query_seq
query_gap_count = query_seq.count("-")
self.query_length = len(query_seq) - query_gap_count
self.target_start = int(target_start) - 1
self.target_seq = target_seq
target_gap_count = target_seq.count("-")
self.target_length = len(target_seq) - target_gap_count
self.no_gap_length = len(target_seq) - target_gap_count - query_gap_count
if len(target_seq) != len(query_seq):
raise ValueError("Length of target sequence not equal to length of query sequence")
def alignment_iterator(alignment, ignore_case=True, include_gaps=False):
target_index = 0
target_offset = 0
query_index = 0
while target_index < len(alignment.target_seq) and query_index < len(alignment.query_seq):
if alignment.target_seq[target_index] == "-": # If it is an insertion
target_offset += 1
elif alignment.query_seq[query_index] != "-" or include_gaps:
reference_index = alignment.target_start + target_index - target_offset
query_nucleotide = alignment.query_seq[query_index].upper() if ignore_case else alignment.query_seq[query_index]
target_nucleotide = alignment.target_seq[target_index].upper() if ignore_case else alignment.target_seq[target_index]
yield SimpleNamespace(reference_index=reference_index,
target_nucleotide=target_nucleotide,
query_nucleotide=query_nucleotide)
target_index += 1
query_index += 1
def count_mismatches(alignment, ignore_case=True):
mismatch_count = 0
for position in alignment_iterator(alignment, ignore_case):
if position.target_nucleotide != position.query_nucleotide:
mismatch_count += 1
return mismatch_count
def save_expected_error_rates(alignments, output_file):
expected_error_rates = [a.expected_errors / a.query_length for a in alignments]
plt.cla()
plt.hist(expected_error_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Expected Error Rate')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Expected Error Rates')
plt.grid(True)
plt.savefig(output_file)
def save_mismatch_rates(alignments, output_file, ignore_case=True):
mismatch_rates = [count_mismatches(a, ignore_case) / a.no_gap_length for a in alignments]
plt.cla()
plt.hist(mismatch_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Rate of mismatches')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Mismatch Rates')
plt.grid(True)
plt.savefig(output_file)
def gap_distribution(sequence):
dist = Counter()
count_length = 0
for char in sequence:
if char == "-":
count_length += 1
elif count_length > 0:
dist[count_length] += 1
count_length = 0
if count_length > 0:
dist[count_length] += 1
return dist
def save_insertion_or_deletion_dist(alignments, output_file, insertion_not_deletion=True):
size_counter = Counter()
for a in alignments:
size_counter += gap_distribution(a.target_seq if insertion_not_deletion else a.query_seq)
sizes, counts = zip(*size_counter.items())
number_of_bins = max(sizes)
number_of_bins = round(number_of_bins / np.ceil(number_of_bins/50))
plt.cla()
n, bins, patches = plt.hist(sizes, number_of_bins, weights=counts, log=True)
plt.ylim(ymin=0.9)
plt.xlim(xmin=1)
plt.xlabel('Size of insertion' if insertion_not_deletion else 'Size of deletion')
plt.ylabel('Count')
plt.tick_params(which='both', direction='out')
plt.title('Insertion size distribution' if insertion_not_deletion else 'Deletion size distribution')
plt.grid(True)
plt.savefig(output_file)
# Get nucleotide distribution
def nucleotide_distribution(alignments, ignore_case=False, include_gaps=True):
max_index = 0
distribution = defaultdict(Counter)
for a in alignments:
for position in alignment_iterator(a, ignore_case, include_gaps):
distribution[position.reference_index][position.query_nucleotide] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [distribution[i] for i in range(max_index)]
def save_nucleotide_map(alignments, output, ignore_case=True, include_gaps=True):
nucleotides = nucleotide_distribution(alignments, ignore_case, include_gaps)
width = len(nucleotides)
keys = set()
for distribution_at_base in nucleotides:
keys.update(set(distribution_at_base.keys()))
keys = sorted(list(keys), key=lambda x: "ZZZ" if x == "-" else x)
nucleotide_count_array = np.zeros((len(keys), width), dtype=np.uint32)
for i, key in enumerate(keys):
for j, counts in enumerate(nucleotides):
nucleotide_count_array[i, j] = counts[key]
cum_sum = nucleotide_count_array.cumsum(axis=0)
height = cum_sum[-1,].max()
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
for i, key in enumerate(keys):
start = 0 if i == 0 else cum_sum[i - 1, x]
end = cum_sum[i, x]
data_matrix[start:end, x, 0:3] = FIXED_COLOR_TABLE[key]
img = to_image(data_matrix[::-1,], ruler_underneath=True)
img.save(output)
# Get coverage map
def coverage_map(alignments, include_gaps=False):
max_index = 0
coverage = Counter()
for a in alignments:
for position in alignment_iterator(a, True, include_gaps):
coverage[position.reference_index] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [coverage[i] for i in range(max_index)]
def save_coverage_map(alignments, output):
coverage_with_gaps = coverage_map(alignments, True)
coverage_without_gaps = coverage_map(alignments, False)
width = len(coverage_with_gaps)
height = max(coverage_with_gaps)
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
y1 = coverage_without_gaps[x]
y2 = coverage_with_gaps[x]
data_matrix[0:y1, x, 0:3] = 0
data_matrix[y1:y2, x, 0:3] = 127
img = to_image(data_matrix[::-1], add_ruler=True, ruler_underneath=True)
img.save(output)
def save_alignment_map(coords, output_file, sort_key=sum, crop=True, no_ruler=False):
if crop:
minimum = min(coords, key=lambda x: x[0])[0]
else:
minimum = 0
maximum = max(coords, key=lambda x: x[1])[1]
dimensions = (len(coords), maximum - minimum)
data_matrix = np.full((dimensions[0], dimensions[1] + 1), 255, dtype=np.uint8)
if sort_key is not None:
coords.sort(key=sort_key)
is_multiple_alignment = len(coords[0]) > 3 and type(coords[0][3]) == list
# Greyscale over the bounds (or black if not multiple alignment)
for i, coord in enumerate(coords):
start = coord[0]
end = coord[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = LIGHT_GRAY if is_multiple_alignment else 0
# Black over the subalignments, if any
if is_multiple_alignment:
for i, coord in enumerate(coords):
for subalignment in coord[3]:
start = subalignment[0]
end = subalignment[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = 0
img = to_image(data_matrix, not no_ruler, offset=minimum)
img.save(output_file)
def to_image(data_matrix, add_ruler=True, ruler_underneath = False, offset=1):
maximum = offset + data_matrix.shape[1]
if add_ruler:
shape = list(data_matrix.shape)
shape[0] = 12 # Number of rows
ruler_matrix = np.full(shape, 255, dtype=data_matrix.dtype)
# tens ticks
ruler_matrix[0 if ruler_underneath else 11, 10-(offset%10)::10] = 0
# 50s ticks
ruler_matrix[1 if ruler_underneath else 10, 50-(offset%50)::50] = 0
if ruler_underneath:
img = toimage(np.vstack([data_matrix, ruler_matrix]))
else:
img = toimage(np.vstack([ruler_matrix, data_matrix]))
draw = ImageDraw.Draw(img)
# Hundreds words
for i in range((offset//100) + 1, maximum // 100 + 1):
centering = (6 * (int(np.log10(i)) + 3) - 1) // 2
draw.text((i * 100 - centering - offset, (data_matrix.shape[0] + 2) if ruler_underneath else 0), str(i) + "00", fill="black")
else:
img = toimage(data_matrix)
return img
| mit |
XCage15/privacyidea | privacyidea/lib/stats.py | 3 | 5464 | # -*- coding: utf-8 -*-
#
# 2015-07-16 Initial writeup
# (c) Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module reads audit data and can create statistics from
audit data using pandas.
This module is tested in tests/test_lib_stats.py
"""
import logging
from privacyidea.lib.log import log_with
import datetime
import StringIO
log = logging.getLogger(__name__)
try:
import matplotlib
MATPLOT_READY = True
matplotlib.style.use('ggplot')
matplotlib.use('Agg')
except Exception as exx:
MATPLOT_READY = False
log.warning("If you want to see statistics you need to install python "
"matplotlib.")
customcmap = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
@log_with(log)
def get_statistics(auditobject, start_time=datetime.datetime.now()
-datetime.timedelta(days=7),
end_time=datetime.datetime.now()):
"""
Create audit statistics and return a JSON object
The auditobject is passed from the upper level, usually from the REST API
as g.auditobject.
:param auditobject: The audit object
:type auditobject: Audit Object as defined in auditmodules.base.Audit
:return: JSON
"""
result = {}
df = auditobject.get_dataframe(start_time=start_time, end_time=end_time)
# authentication successful/fail per user or serial
for key in ["user", "serial"]:
result["validate_%s_plot" % key] = _get_success_fail(df, key)
# get simple usage
for key in ["serial", "action"]:
result["%s_plot" % key] = _get_number_of(df, key)
# failed authentication requests
for key in ["user", "serial"]:
result["validate_failed_%s_plot" % key] = _get_fail(df, key)
result["admin_plot"] = _get_number_of(df, "action", nums=20)
return result
def _get_success_fail(df, key):
try:
output = StringIO.StringIO()
series = df[df.action.isin(["POST /validate/check",
"GET /validate/check"])].groupby([key,
'success']).size().unstack()
fig = series.plot(kind="bar", stacked=True,
legend=True,
title="Authentications",
grid=True,
color=customcmap).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,%s' % image_data
except Exception as exx:
log.info(exx)
image_uri = "%s" % exx
return image_uri
def _get_fail(df, key):
try:
output = StringIO.StringIO()
series = df[(df.success==0)
& (df.action.isin(["POST /validate/check",
"GET /validate/check"]))][
key].value_counts()[:5]
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1,1,1)
fig = series.plot(ax=ax, kind="bar",
colormap="Reds",
stacked=False,
legend=False,
grid=True,
title="Failed Authentications").get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,%s' % image_data
except Exception as exx:
log.info(exx)
image_uri = "%s" % exx
return image_uri
def _get_number_of(df, key, nums=5):
"""
return a data url image with a single keyed value.
It plots the "nums" most occurrences of the "key" column in the dataframe.
:param df: The DataFrame
:type df: Pandas DataFrame
:param key: The key, which should be plotted.
:param count: how many of the most often values should be plotted
:return: A data url
"""
output = StringIO.StringIO()
output.truncate(0)
try:
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1, 1, 1)
series = df[key].value_counts()[:nums]
fig = series.plot(ax=ax, kind="bar", colormap="Blues",
legend=False,
stacked=False,
title="Numbers of %s" % key,
grid=True).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,%s' % image_data
except Exception as exx:
log.info(exx)
image_uri = "No data"
return image_uri
| agpl-3.0 |
phobson/statsmodels | statsmodels/regression/linear_model.py | 1 | 97462 | # TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']
import numpy as np
import pandas as pd
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.tools import add_constant, chain_dot, pinv_extended
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom, defined as the rank of the regressor
matrix minus 1 if a constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom, defined as the number of observations
minus the rank of the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, X):
raise NotImplementedError("Subclasses should implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance estimators
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
Returns
-------
A RegressionResults class instance.
See Also
---------
regression.linear_model.RegressionResults
regression.linear_model.RegressionResults.get_robustcov_results
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if ((not hasattr(self, 'pinv_wexog')) or
(not hasattr(self, 'normalized_cov_params')) or
(not hasattr(self, 'rank'))):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np_matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if ((not hasattr(self, 'exog_Q')) or
(not hasattr(self, 'exog_R')) or
(not hasattr(self, 'normalized_cov_params')) or
(getattr(self, 'rank', None) is None)):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np_matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model has not yet been fit, params is not optional.
"""
#JP: this doesn't look correct for GLMAR
#SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Returns a random number generator for the predictive distribution.
Parameters
----------
params : array-like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array-like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and
'scale' as arguments and return a random number generator
implementing an `rvs` method for simulating random values.
Defaults to Gaussian.
Returns a frozen random number generator object with mean and
variance determined by the fitted linear model. Use the
``rvs`` method to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen
class GLS(RegressionModel):
__doc__ = """
Generalized least squares model with a general covariance structure.
%(params)s
sigma : scalar or array
`sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
**Attributes**
pinv_wexog : array
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : array
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : array
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : array
`sigma` is the n x n covariance structure of the error terms.
wexog : array
Design matrix whitened by `cholsigmainv`
wendog : array
Response variable whitened by `cholsigmainv`
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary()))
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
#TODO: add options igls, for iterative fgls if sigma is None
#TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
#store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, X):
"""
GLS whiten method.
Parameters
-----------
X : array-like
Data to be whitened.
Returns
-------
np.dot(cholsigmainv,X)
See Also
--------
regression.GLS
"""
X = np.asarray(X)
if self.sigma is None or self.sigma.shape == ():
return X
elif self.sigma.ndim == 1:
if X.ndim == 1:
return X * self.cholsigmainv
else:
return X * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, X)
def loglike(self, params):
"""
Returns the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array-like
The parameter estimates
Returns
-------
loglike : float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right)
Y and Y-hat are whitened.
"""
#TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim==2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
class WLS(RegressionModel):
__doc__ = """
A regression model with diagonal but non-identity covariance structure.
The weights are presumed to be (proportional to) the inverse of the
variance of the observations. That is, if the variables are to be
transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
%(extra_params)s
Attributes
----------
weights : array
The stored weights supplied as an argument.
See regression.GLS
Examples
---------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, X):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights)
Parameters
----------
X : array-like
Data to be whitened
Returns
-------
sqrt(weights)*X
"""
#print(self.weights.var()))
X = np.asarray(X)
if X.ndim == 1:
return X * np.sqrt(self.weights)
elif X.ndim == 2:
return np.sqrt(self.weights)[:, None]*X
def loglike(self, params):
"""
Returns the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array-like
The parameter estimates.
Returns
-------
llf : float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\\frac{n}{2}\\log\\left(Y-\\hat{Y}\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}log\\left(\\left|W\\right|\\right)
where :math:`W` is a diagonal matrix
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog,params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
class OLS(WLS):
__doc__ = """
A simple ordinary least squares model.
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
GLS
Examples
--------
>>> import numpy as np
>>>
>>> import statsmodels.api as sm
>>>
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>>
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0])))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
Notes
-----
No constant is added by the model unless you are using formulas.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
#TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array-like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf
def whiten(self, Y):
"""
OLS model whitener does nothing: returns Y.
"""
return Y
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array-like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T, params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale
def _setup_score_hess(self):
y = self.wendog
if hasattr(self, 'offset'):
y = y - self.offset
self._wendog_xprod = np.sum(y * y)
self._wexog_xprod = np.dot(self.wexog.T, self.wexog)
self._wexog_x_wendog = np.dot(self.wexog.T, y)
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array-like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T, params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale
return hess
def fit_regularized(self, method="elastic_net", alpha=0.,
start_params=None, profile_scale=False,
refit=False, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : string
Only the 'elastic_net' approach is currently implemented.
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
start_params : array-like
Starting values for ``params``.
profile_scale : bool
If True the penalized fit is computed using the profile
(concentrated) log-likelihood for the Gaussian model.
Otherwise the fit uses the residual sum of squares.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
Returns
-------
An array of coefficients, or a RegressionResults object of the
same type returned by ``fit``.
Notes
-----
The elastic net approach closely follows that implemented in
the glmnet package in R. The penalty is a combination of L1
and L2 penalties.
The function that is minimized is: ..math::
0.5*RSS/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
L1_wt : float
Must be in [0, 1]. The L1 penalty has weight L1_wt and the
L2 penalty has weight 1 - L1_wt.
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
References
----------
Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
"""
from statsmodels.base.elastic_net import fit_elasticnet
# In the future we could add support for other penalties, e.g. SCAD.
if method != "elastic_net":
raise ValueError("method for fit_regularied must be elastic_net")
# Set default parameters.
defaults = {"maxiter" : 50, "L1_wt" : 1, "cnvrg_tol" : 1e-10,
"zero_tol" : 1e-10}
defaults.update(kwargs)
# If a scale parameter is passed in, the non-profile
# likelihood (residual sum of squares divided by -2) is used,
# otherwise the profile likelihood is used.
if profile_scale:
loglike_kwds = {}
score_kwds = {}
hess_kwds = {}
else:
loglike_kwds = {"scale": 1}
score_kwds = {"scale": 1}
hess_kwds = {"scale": 1}
return fit_elasticnet(self, method=method,
alpha=alpha,
start_params=start_params,
loglike_kwds=loglike_kwds,
score_kwds=score_kwds,
hess_kwds=hess_kwds,
refit=refit,
**defaults)
class GLSAR(GLS):
__doc__ = """
A regression model with an AR(p) covariance structure.
%(params)s
rho : int
Order of the autoregressive covariance
%(extra_params)s
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):
#this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0,1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
#JP this looks wrong, should be a regression on constant
#results for rho estimate now identical to yule-walker on y
#super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),
missing=missing, **kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwds):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : integer, optional
the number of iterations
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if
max(abs(last - current) / abs(last)) < rtol
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho':[self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we didn't update rho
results = self.fit(history=history, **kwds)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results
def whiten(self, X):
"""
Whiten a series of columns according to an AR(p)
covariance structure. This drops initial p observations.
Parameters
----------
X : array-like
The data to be whitened,
Returns
-------
whitened array
"""
#TODO: notation for AR process
X = np.asarray(X, np.float64)
_X = X.copy()
#the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]
return _X[self.order:]
def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
"""
Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
Unbiased or maximum-likelihood estimator (mle)
See, for example:
http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
Parameters
----------
X : array-like
1d array
order : integer, optional
The order of the autoregressive process. Default is 1.
method : string, optional
Method can be "unbiased" or "mle" and this determines denominator in
estimate of autocorrelation function (ACF) at lag k. If "mle", the
denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
The default is unbiased.
df : integer, optional
Specifies the degrees of freedom. If `df` is supplied, then it is assumed
the X has `df` degrees of freedom rather than `n`. Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho
The autoregressive coefficients
sigma
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog,
order=4, method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
#TODO: define R better, look back at notes and technical notes on YW.
#First link here is useful
#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
X = np.array(X, dtype=np.float64)
if demean:
X -= X.mean() # automatically demean's X
n = df or X.shape[0]
if method == "unbiased": # this is df_resid ie., n - p
denom = lambda k: n - k
else:
denom = lambda k: n
if X.ndim > 1 and X.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (X**2).sum() / denom(0)
for k in range(1,order+1):
r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv==True:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
class RegressionResults(base.LikelihoodModelResults):
"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Returns
-------
**Attributes**
aic
Aikake's information criteria. For a model with a constant
:math:`-2llf + 2(df_model + 1)`. For a model without a constant
:math:`-2llf + 2(df_model)`.
bic
Bayes' information criteria For a model with a constant
:math:`-2llf + \log(n)(df_model+1)`. For a model without a constant
:math:`-2llf + \log(n)(df_model)`
bse
The standard errors of the parameter estimates.
pinv_wexog
See specific model class docstring
centered_tss
The total (weighted) sum of squares centered about the mean.
cov_HC0
Heteroscedasticity robust covariance matrix. See HC0_se below.
cov_HC1
Heteroscedasticity robust covariance matrix. See HC1_se below.
cov_HC2
Heteroscedasticity robust covariance matrix. See HC2_se below.
cov_HC3
Heteroscedasticity robust covariance matrix. See HC3_se below.
cov_type
Parameter covariance estimator used for standard errors and t-stats
df_model
Model degress of freedom. The number of regressors `p`. Does not
include the constant if one is present
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
ess
Explained sum of squares. If a constant is present, the centered
total sum of squares minus the sum of squared residuals. If there is
no constant, the uncentered total sum of squares is used.
fvalue
F-statistic of the fully specified model. Calculated as the mean
squared error of the model divided by the mean squared error of the
residuals.
f_pvalue
p-value of the F-statistic
fittedvalues
The predicted the values for the original (unwhitened) design.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
history
Estimation history for iterative estimators
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a cached property.
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_see is a cached property.
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_see is a cached property.
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_see is a cached property.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
model
A pointer to the model instance that called fit() or results.
mse_model
Mean squared error the model. This is the explained sum of squares
divided by the model degrees of freedom.
mse_resid
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
mse_total
Total mean squared error. Defined as the uncentered total sum of
squares divided by n the number of observations.
nobs
Number of observations n.
normalized_cov_params
See specific model class docstring
params
The linear coefficients that minimize the least squares criterion. This
is usually called Beta for the classical linear model.
pvalues
The two-tailed p values for the t-stats of the params.
resid
The residuals of the model.
resid_pearson
`wresid` normalized to have unit variance.
rsquared
R-squared of a model with an intercept. This is defined here as
1 - `ssr`/`centered_tss` if the constant is included in the model and
1 - `ssr`/`uncentered_tss` if the constant is omitted.
rsquared_adj
Adjusted R-squared. This is defined here as
1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included
and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.
scale
A scale factor for the covariance matrix.
Default value is ssr/(n-p). Note that the square root of `scale` is
often called the standard error of the regression.
ssr
Sum of squared (whitened) residuals.
uncentered_tss
Uncentered sum of squares. Sum of the squared values of the
(whitened) endogenous response variable.
wresid
The residuals of the transformed/whitened regressand and regressor(s)
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
super(RegressionResults, self).__init__(model, params,
normalized_cov_params,
scale)
self._cache = resettable_cache()
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
self.use_t = True # TODO: class default
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
for key in kwargs:
setattr(self, key, kwargs[key])
def __str__(self):
self.summary()
def conf_int(self, alpha=.05, cols=None):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
return self.model.wendog - self.model.predict(self.params,
self.model.wexog)
@cache_readonly
def resid(self):
return self.model.endog - self.model.predict(self.params,
self.model.exog)
#TODO: fix writable example
@cache_writable()
def scale(self):
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
model = self.model
weights = getattr(model, 'weights', None)
if weights is not None:
return np.sum(weights*(model.endog - np.average(model.endog,
weights=weights))**2)
else: # this is probably broken for GLS
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)
@cache_readonly
def mse_model(self):
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implcit constant, e.g. all dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
#TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:,None]*self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
See statsmodels.RegressionResults
"""
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale=(self.wresid/(1-h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
An array wresid/sqrt(scale)
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# don't divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller model are spanned
by the regressors in the larger model and the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:,None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of regressors
return np.allclose(score_l2,0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""Use Lagrange Multiplier test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the residuals
from the restricted model. If True, the covariance of the scores
are used and the LM test is identical to the large sample version
of the LR test.
Returns
-------
lm_value : float
test statistic, chi2 distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
TODO: explain LM text
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:,None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:,None]
demean = False
if demean:
scores = scores - scores.mean(0)[None,:]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, shoudl use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
XpX = np.dot(wexog.T,wexog) / n
Sinv = inv(sigma2 * XpX)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
Sinv = inv(np.dot(scores.T,scores) / n)
elif cov_type == 'HAC':
print("HAC")
maxlags = self.cov_kwds['maxlags']
Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
#cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
Sinv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * chain_dot(s,Sinv,s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""use F test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
f_value : float
test statistic, F distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two models.
This is not a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results under the assumption of homoscedasticity
and no autocorrelation (sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
likelihood ratio, chisquare distributed with df_diff degrees of
freedom
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
The exact likelihood ratio is valid for homoskedastic data, and is
defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\mathcal{L}` is the likelihood of the model. With :math:`D`
distributed as chisquare with df equal to difference in number of
parameters or equivalently difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
TODO: put into separate function, needs tests
"""
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`true` if the cov_type is nonrobust, and `false` in all other cases.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument 'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calulated with a small
sample correction.
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series for
each panel unit or cluster need to be stacked.
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
Reminder:
`use_correction` in "nw-groupsum" and "nw-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = ('Standard Errors are based on ' +
'fixed scale')
res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covarians ' +
'does not use keywords')
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'robust ' + '(' + cov_type + ')')
# TODO cannot access cov without calling se first
getattr(self, cov_type.upper() + '_se')
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type == 'HAC':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'and autocorrelation robust (HAC) using %d lags and %s small ' +
'sample correction') % (maxlags, ['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
use_correction=use_correction)
elif cov_type == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist()
groupidx = lzip([0] + tt, tt + [len(time)])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(np.diff(time) < 0)[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = (
'Driscoll and Kraay Standard Errors are robust to ' +
'cluster correlation ' + '(' + cov_type + ')')
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwds):
return pred.get_prediction(self, exog=exog, transform=transform,
weights=weights, row_labels=row_labels, **kwds)
get_prediction.__doc__ = pred.get_prediction.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
#add warnings/notes, added to text format only
etext =[]
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
#top = summary_top(self, gleft=topleft, gright=diagn_left, #[],
# yname=yname, xname=xname,
# title=self.model.__class__.__name__ + ' ' +
# "Regression Results")
#par = summary_params(self, yname=yname, xname=xname, alpha=.05,
# use_t=False)
#
#diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="Linear Model")
#
#return summary_return([top, par, diagn], return_fmt=return_fmt)
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function to summarize the regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from statsmodels.compat.collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) #in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
def get_influence(self):
"""
get an instance of Influence with influence and outlier measures
Returns
-------
infl : Influence instance
the instance has methods to calculate the main influence and
outlier measures for the OLS regression
See also
--------
:class:`statsmodels.stats.outliers_influence.OLSInfluence`
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05):
"""
Test observations for outliers according to method
Parameters
----------
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha)
def el_test(self, b0_vals, param_nums, return_weights=0,
ret_params=0, method='nm',
stochastic_exog=1, return_params=0):
"""
Tests single or joint hypotheses of the regression parameters using
Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested
param_nums : 1darray
The parameter number to be tested
print_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. Default is False
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default = TRUE
Returns
-------
res : tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.test_beta([0], [1])
>>> (1.7894660442330235e-07, 27.248146353709153)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress([],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,
maxiter=10000, full_output=1, disp=0,
args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,
method='nm', stochastic_exog=1):
"""
Computes the confidence interval for the parameter given by param_num
using Empirical Likelihood
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired
sig : float
The significance level. Default is .05
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
Returns
-------
ci : tuple
The confidence interval
See Also
--------
el_test
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical
value.
The function returns the results of each iteration of brentq at
each value of beta.
The current function value of the last printed optimization
should be the critical value at the desired significance level.
For alpha=.05, the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to
do el_test([lower_limit], [param_num])
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0]-r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq' : 'columns',
'sresid' : 'rows',
'weights' : 'rows',
'wresid' : 'rows',
'bcov_unscaled' : 'cov',
'bcov_scaled' : 'cov',
'HC0_se' : 'columns',
'HC1_se' : 'columns',
'HC2_se' : 'columns',
'HC3_se' : 'columns',
'norm_resid' : 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_results = OLS(data.endog, data.exog).fit() #results
gls_results = GLS(data.endog, data.exog).fit() #results
print(ols_results.summary())
tables = ols_results.summary(returns='tables')
csv = ols_results.summary(returns='csv')
"""
Summary of Regression Results
=======================================
| Dependent Variable: ['y']|
| Model: OLS|
| Method: Least Squares|
| Date: Tue, 29 Jun 2010|
| Time: 22:32:21|
| # obs: 16.0|
| Df residuals: 9.0|
| Df model: 6.0|
===========================================================================
| coefficient std. error t-statistic prob.|
---------------------------------------------------------------------------
| x1 15.0619 84.9149 0.1774 0.8631|
| x2 -0.0358 0.0335 -1.0695 0.3127|
| x3 -2.0202 0.4884 -4.1364 0.002535|
| x4 -1.0332 0.2143 -4.8220 0.0009444|
| x5 -0.0511 0.2261 -0.2261 0.8262|
| x6 1829.1515 455.4785 4.0159 0.003037|
| const -3482258.6346 890420.3836 -3.9108 0.003560|
===========================================================================
| Models stats Residual stats |
---------------------------------------------------------------------------
| R-squared: 0.995479 Durbin-Watson: 2.55949 |
| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |
| F-statistic: 330.285 Prob(Omnibus): 0.687765 |
| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |
| Log likelihood: -109.617 Prob(JB): 0.838294 |
| AIC criterion: 233.235 Skew: 0.419984 |
| BIC criterion: 238.643 Kurtosis: 2.43373 |
---------------------------------------------------------------------------
"""
| bsd-3-clause |
behzadnouri/scipy | scipy/interpolate/_cubic.py | 8 | 29300 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
DOI:10.1137/0717021
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
DOI: http://dx.doi.org/10.1137/1.9780898717952
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self._bpoly)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
romeric/Fastor | benchmark/external/benchmark_inverse/benchmark_plot.py | 1 | 1615 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':14})
rc('text', usetex=True)
def read_results():
ms, ns, times_eigen, times_fastor = [], [], [], []
with open("benchmark_results.txt", "r") as f:
lines = f.readlines()
for line in lines:
sline = line.split(' ')
if len(sline) == 4:
times_eigen.append(float(sline[1]))
times_fastor.append(float(sline[2]))
elif len(sline) == 7 and "size" in sline[1]:
ms.append(int(sline[4]))
ns.append(int(sline[5]))
return np.array(ms), np.array(ns), np.array(times_eigen), np.array(times_fastor)
def main():
ms, ns, times_eigen, times_fastor = read_results()
fig, ax = plt.subplots()
index = np.arange(len(ms))
bar_width = 0.2
opacity = 0.8
rects1 = plt.bar(index, times_eigen/1e-6, bar_width,
alpha=opacity,
color='#C03B22',
label='Eigen')
rects3 = plt.bar(index + bar_width, times_fastor/1e-6, bar_width,
alpha=opacity,
color='#E98604',
label='Fastor')
xticks = [str(dim[0]) + 'x' + str(dim[1]) for dim in zip(ms,ns)]
plt.xlabel('(M,M)')
plt.ylabel('Time ($\mu$sec)')
plt.title("B = inv(A)")
plt.xticks(index, xticks, rotation=45)
plt.legend()
plt.tight_layout()
plt.grid(True)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
plt.show()
if __name__ == "__main__":
main() | mit |
sbenthall/bigbang | tests/bigbang_tests.py | 1 | 7419 | from nose.tools import *
from testfixtures import LogCapture
from bigbang import repo_loader
import bigbang.archive as archive
import bigbang.mailman as mailman
import bigbang.parse as parse
import bigbang.process as process
import bigbang.utils as utils
import mailbox
import os
import networkx as nx
import pandas as pd
from config.config import CONFIG
test_txt = ""
TEMP_DIR = os.path.join(CONFIG.test_data_path, "tmp")
def test_git_dependancy():
repo = repo_loader.get_repo("https://github.com/sbenthall/bigbang.git", in_type = "remote")
def setup():
try:
os.mkdir(TEMP_DIR)
except OSError: # Python 2.7-specific, alas; FileExistsError in py3
pass # temporary directory already exists, that's cool
def teardown():
# remove all files in the temporary files directory, as cleanup
temp_files = os.listdir(TEMP_DIR)
for f in temp_files:
os.remove(os.path.join(TEMP_DIR, f))
def test_split_references():
refs = " <ye1y9ljtxwk.fsf@orange30.ex.ac.uk>\n\t<055701c16727$b57fed90$8fd6afcf@pixi.com>"
split = parse.split_references(refs)
assert len(split) == 2, split
def test_mailman_chain():
name = "bigbang-dev-test.txt"
#archive loaded from mbox
arx = archive.Archive(name,archive_dir="tests/data",mbox=True)
arx.save("test.csv")
#archive loaded from stored csv
arx2 = archive.load("test.csv")
print arx.data.dtypes
print arx.data.shape
assert arx.data.shape == arx2.data.shape, \
"Original and restored archives are different shapes"
assert (arx2.data.index == arx.data.index).all(), \
"Original and restored archives have nonidentical indices"
assert [t.get_num_messages() for t in arx.get_threads()] == [3,1,2], \
"Thread message count in mbox archive is off"
assert [t.get_num_messages() for t in arx2.get_threads()] == [3,1,2], \
"Thread message count in restored archive is off"
# smoke test entity resolution
arx2.resolve_entities()
os.remove("test.csv")
def test_clean_message():
name = "2001-November.txt"
arx = archive.Archive(name,archive_dir="tests/data",mbox=True)
body = arx.data['Body'][ '<E165uMn-0002IJ-00@spock.physics.mcgill.ca>']
assert "But seemingly it is even stranger than this." in body, \
"Selected wrong message"
assert "Is it a problem of lapack3.0 of of" in body, \
"Quoted text is not in uncleaned message"
assert "Is it a problem of lapack3.0 of of" not in utils.clean_message(body), \
"Quoted text is in cleaned message"
def test_from_header_distance():
a = 'Fernando.Perez at colorado.edu (Fernando.Perez at colorado.edu)'
b = 'Fernando.Perez at colorado.edu (Fernando.Perez@colorado.edu)'
assert process.from_header_distance(a,b) == 0, \
"from_header_distance computing incorrect value"
a = ''
b = ''
assert True, \
"from_header_distance computing incorrect value"
def test_email_entity_resolution():
name = "2001-November.txt"
arx = archive.Archive(name,archive_dir="tests/data",mbox=True)
e = process.resolve_sender_entities(arx.get_activity(resolved=False))
eact = utils.repartition_dataframe(arx.get_activity(),e)
assert True, "email entity resolution crashed"
def test_labeled_blockmodel():
g = nx.DiGraph()
g.add_edge(0,1)
g.add_edge(0,2)
g.add_edge(0,3)
g.add_edge(0,4)
p = {'B': [1,2,3,4], 'A': [0]}
bg = utils.labeled_blockmodel(g,p)
assert list(bg.edges(data=True))[0][2]['weight'] == 4.0, \
"Incorrect edge weight in labeled blockmodel"
assert list(bg.edges()) == [('A','B')], \
"Incorrected edges in labeled blockmodel"
def test_valid_urls():
test_urls_path = os.path.join(CONFIG.test_data_path, 'urls-test-file.txt')
with LogCapture() as l:
urls = mailman.urls_to_collect(test_urls_path)
assert "#ignored" not in urls, "failed to ignore a comment line"
assert "http://www.example.com/1" in urls, "failed to find valid url"
assert "http://www.example.com/2/" in urls, "failed to find valid url, whitespace strip issue"
assert "https://www.example.com/3/" in urls, "failed to find valid url, whitespace strip issue"
assert "invalid.com" not in urls, "accepted invalid url"
assert len(l.actual()) == 2, "wrong number of log entries"
for (fromwhere, level, msg) in l.actual():
assert level == "WARNING", "logged something that wasn't a warning"
assert len(urls) == 3, "wrong number of urls parsed from file"
def test_empty_list_compute_activity_issue_246():
test_df_csv_path = os.path.join(CONFIG.test_data_path, 'empty-archive-df.csv')
df = pd.read_csv(test_df_csv_path)
with assert_raises(mailman.MissingDataException):
empty_archive = archive.Archive(df)
activity = empty_archive.get_activity()
def test_mailman_normalizer():
browse_url = 'https://mailarchive.ietf.org/arch/browse/ietf/'
search_url = 'https://mailarchive.ietf.org/arch/search/?email_list=ietf'
random_url = 'http://example.com'
better_url = 'https://www.ietf.org/mail-archive/text/ietf/'
assert mailman.normalize_archives_url(browse_url) == better_url, "failed to normalize"
assert mailman.normalize_archives_url(search_url) == better_url, "failed to normalize"
assert mailman.normalize_archives_url(random_url) == random_url, "should not have changed other url"
def test_mailman_list_name():
ietf_archive_url = 'https://www.ietf.org/mail-archive/text/ietf/'
w3c_archive_url = 'https://lists.w3.org/Archives/Public/public-privacy/'
random_url = 'http://example.com'
assert mailman.get_list_name(ietf_archive_url) == 'ietf', "failed to grab ietf list name"
assert mailman.get_list_name(w3c_archive_url) == 'public-privacy', "failed to grab w3c list name"
assert mailman.get_list_name(random_url) == random_url, "should not have changed other url"
def test_activity_summary():
list_url = 'https://lists.w3.org/Archives/Public/test-activity-summary/'
activity_frame = mailman.open_activity_summary(list_url, archive_dir=CONFIG.test_data_path)
assert str(type(activity_frame)) == "<class 'pandas.core.frame.DataFrame'>", "not a DataFrame?"
assert len(activity_frame.columns) == 1, "activity summary should have one column"
def test_provenance():
test_list_name = 'test-list-name'
test_list_url = 'https://example.com/test-list-url/'
test_notes = 'Test notes.'
mailman.populate_provenance(TEMP_DIR, list_name=test_list_name, list_url=test_list_url, notes=test_notes)
assert os.path.exists(os.path.join(TEMP_DIR, mailman.PROVENANCE_FILENAME)), "provenance file should have been created"
provenance = mailman.access_provenance(TEMP_DIR)
assert provenance != None, "provenance should be something"
assert provenance['list']['list_name'] == test_list_name, "list name should be in the provenance"
assert provenance['list']['list_url'] == test_list_url, "list url should be in the provenance"
assert provenance['notes'] == test_notes, "notes should be in the provenance"
provenance['notes'] = 'modified provenance'
mailman.update_provenance(TEMP_DIR, provenance)
provenance_next = mailman.access_provenance(TEMP_DIR)
assert provenance_next['notes'] == 'modified provenance', "confirm modified provenance" | agpl-3.0 |
CodeReclaimers/neat-python | examples/xor/visualize.py | 1 | 5915 | from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/utils/extmath.py | 16 | 26642 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=None,
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter: int (default is 4)
Number of power iterations. It can be used to deal with very noisy
problems. When `n_components` is small (< .1 * min(X.shape)) `n_iter`
is set to 7, unless the user specifies a higher number. This improves
precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter is None:
# Checks if the number of iterations is explicitely specified
n_iter = 4
n_iter_specified = False
else:
n_iter_specified = True
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
if n_components < .1 * min(M.shape) and n_iter < 7:
if n_iter_specified:
warnings.warn("The number of power iterations is increased to "
"7 to achieve higher precision.")
n_iter = 7
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
imaculate/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/learn/__init__.py | 5 | 2092 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning. See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedClassifier
@@LinearClassifier
@@LinearRegressor
@@LogisticRegressor
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
zorojean/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
franciscomoura/data-science-and-bigdata | introducao-linguagens-estatisticas/mineracao-dados-python/codigo-fonte/code-06.py | 1 | 2285 | # -*- coding: utf-8 -*-
# code-06.py
"""
Dependência: Matplotlib, NumPy
Executar no prompt: pip install matplotlib
Executar no prompt: pip install numpy
Executar no prompt: pip install scikit-learn
Executar no prompt: pip install scipy
*** Atenção:
Este arquivo deverá executado no mesmo diretório do arquivo iris.csv
"""
import numpy as np
# lê as primeiras 4 colunas
data = np.genfromtxt('iris.csv', delimiter=',', usecols=(0, 1, 2, 3))
# lê a quinta coluna(última)
target_names = np.genfromtxt('iris.csv', delimiter=',', usecols=(4), dtype=str)
# converter o vetor de strings que contêm a classe em números inteiros
target = np.zeros(len(target_names), dtype=np.int)
target[target_names == 'setosa'] = 0
target[target_names == 'versicolor'] = 1
target[target_names == 'virginica'] = 2
# parte 1
from sklearn.cluster import KMeans
# inicialização correta para o cluster mostrar o mesmo resultado a cada execução
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=3425)
kmeans.fit(data)
# parte 2
clusters = kmeans.predict(data)
# parte 3
print("Completude e homogeneidade:")
from sklearn.metrics import completeness_score, homogeneity_score
print(completeness_score(target, clusters))
# Saída: 0.764986151449
print(homogeneity_score(target, clusters))
# Saída: 0.751485402199
# parte 4 - revisada
print("Gera o gráfico de dispersão")
import pylab as pl
pl.figure()
pl.subplot(211) # topo, figura com as classes reais
pl.plot(data[target == 0, 2], data[target == 0, 3], 'bo', alpha=.7) # 0 setosa
pl.plot(data[target == 1, 2], data[target == 1, 3], 'ro', alpha=.7) # 1 versicolor
pl.plot(data[target == 2, 2], data[target == 2, 3], 'go', alpha=.7) # 2 virginica
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.subplot(212) # embaixo, figura com as classes atribuídas automaticamente
pl.plot(data[clusters == 0, 2], data[clusters == 0, 3], 'go', alpha=.7) # clusters 0 verginica
pl.plot(data[clusters == 1, 2], data[clusters == 1, 3], 'bo', alpha=.7) # clusters 1 setosa
pl.plot(data[clusters == 2, 2], data[clusters == 2, 3], 'ro', alpha=.7) # clusters 2 versicolor
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.show()
| apache-2.0 |
keshr3106/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
softwaresaved/SSINetworkGraphics | Fellows/Python/map_fellows_network.py | 1 | 3548 | import os
import ast
import requests, gspread
import numpy as np
import matplotlib.pyplot as plt
from oauth2client.client import SignedJwtAssertionCredentials
from mpl_toolkits.basemap import Basemap
#Google Authorisation section and getting a worksheet from Google Spreadsheet
def authenticate_google_docs():
f = file(os.path.join('SSI Network Graphics-3357cb9f30de.p12'), 'rb')
SIGNED_KEY = f.read()
f.close()
scope = ['https://spreadsheets.google.com/feeds', 'https://docs.google.com/feeds']
credentials = SignedJwtAssertionCredentials('devasena.prasad@gmail.com', SIGNED_KEY, scope)
data = {
'refresh_token' : '1/NM56uCG7uFT6VVAAYX3B5TbcMk43wn1xE8Wr-7dsb7lIgOrJDtdun6zK6XiATCKT',
'client_id' : '898367260-pmm78rtfct8af7e0utis686bv78eqmqs.apps.googleusercontent.com',
'client_secret' : 'Cby-rjWDg_wWTSQw_8DDKb3v',
'grant_type' : 'refresh_token',
}
r = requests.post('https://accounts.google.com/o/oauth2/token', data = data)
credentials.access_token = ast.literal_eval(r.text)['access_token']
gc = gspread.authorize(credentials)
return gc
gc_ret = authenticate_google_docs()
sh = gc_ret.open_by_url('https://docs.google.com/spreadsheets/d/13_ZIdeF7oS0xwp_nhGRoVTv7PaXvfLMwVxvgt_hNOkg/edit#gid=383409775')
worksheet_list = sh.worksheets() # Get list of worksheets
#Print the names of first and second worksheets
print "First 2 worksheets of Fellows data Google spreadsheet are:", worksheet_list[0], worksheet_list[1]
# Get all values from the first, seventh and eight columns of Sample datset
values_list_names = worksheet_list[0].col_values(1)
destination_lat_values = worksheet_list[0].col_values(7)
destination_lon_values = worksheet_list[0].col_values(8)
print "Names of SSI fellows are:",values_list_names
print "Destination Latitude values are:",destination_lat_values
print "Destination Longitude values are:", destination_lon_values
# get all values from first, fourth and fifth columns of Home Institutions worksheet
fellows_list_names = worksheet_list[1].col_values(1)
home_lat_values = worksheet_list[1].col_values(4)
home_lon_values = worksheet_list[1].col_values(5)
print "Names of SSI fellows are:",fellows_list_names
print "Home Institution Latitude values are:",home_lat_values
print "Home Institution Longitude values are:", home_lon_values
# create new figure, axes instances.
fig=plt.figure()
ax=fig.add_axes([0.1,0.1,0.8,0.8])
# setup mercator map projection.
m = Basemap(llcrnrlon=-150.,llcrnrlat=-40.,urcrnrlon=150.,urcrnrlat=80.,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=40.,lon_0=-20.,lat_ts=20.)
#Plotting fellows routes on map
print "No. of unique fellows are:", (len(worksheet_list[1].col_values(1))-1)
colcode = ['b','r','g','y','m','c','k','w']
i = 1
j = 1
print "No. of destination entries in the Sample datasheet:", (len(worksheet_list[0].col_values(7))-1)
while i < len(worksheet_list[1].col_values(1)):
while j < len(worksheet_list[0].col_values(7)):
m.drawgreatcircle(float(home_lon_values[i]),float(home_lat_values[i]),float(destination_lon_values[j]),float(destination_lat_values[j]),linewidth=2,color=colcode[i-1])
j = j + 1
i = i + 1
#label=fellows_list_names[i]
m.drawcoastlines()
m.fillcontinents()
# draw parallels
m.drawparallels(np.arange(10,90,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
ax.set_title('SSI Fellows Impact')
plt.legend()
plt.show()
| bsd-3-clause |
jballanc/openmicroscopy | components/tools/OmeroPy/src/omero/install/logs_library.py | 5 | 6661 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Function for parsing OMERO log files.
The format expected is defined for Python in
omero.util.configure_logging.
Copyright 2010 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
:author: Josh Moore <josh@glencoesoftware.com>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
from time import mktime, strptime
import fileinput
import logging
import sys
import os
import re
def parse_time(value):
"""
parse the time format used by log4j into seconds (float)
since the epoch
"""
parts = value.split(",")
value = parts[0]
millis = float(parts[1]) / 1000.0
t = mktime(strptime(value, "%Y-%m-%d %H:%M:%S"))
t = float(t)
t += millis
return t
class log_line(object):
"""
2009-04-09 15:11:58,029 INFO [ ome.services.util.ServiceHandler] (l.Server-6) Meth: interface ome.api.IQuery.findByQuery
01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
"""
def __init__(self, line):
self.line = line
line.strip()
self.date = line[0:23]
self.level = line[24:28]
self.thread = line[74:84]
self.message = line[85:].strip()
self.status = line[86:91]
self.method = line[96:].strip()
def contains(self, s):
return 0 <= self.line.find(s)
def contains_any(self, l):
for i in l:
if self.contains(i):
return True
return False
class log_watcher(object):
def __init__(self, files, entries, exits, storeonce = None, storeall = None):
if storeonce is None: storeonce = []
if storeall is None: storeall = []
self.files = files
self.entries = entries
self.exits = exits
self.storeonce = storeonce
self.storeall = storeall
def gen(self):
self.m = {}
try:
for line in fileinput.input(self.files):
ll = log_line(line)
if ll.contains_any(self.entries):
self.m[ll.thread] = ll
elif ll.contains_any(self.storeonce):
try:
value = self.m[ll.thread]
try:
value.once
except:
value.once = ll
except KeyError:
logging.debug("Not found: " + line)
elif ll.contains_any(self.storeall):
try:
value = self.m[ll.thread]
value.all.append(ll)
except AttributeError:
value.all = [ll]
except KeyError:
logging.debug("Not found: " + line)
elif ll.contains_any(self.exits):
try:
value = self.m[ll.thread]
del self.m[ll.thread] # Free memory
value.start = parse_time(value.date)
value.stop = parse_time(ll.date)
value.took = value.stop - value.start
yield value
except KeyError:
logging.debug("Not found: " + line)
finally:
fileinput.close()
class allthreads_watcher(log_watcher):
def __init__(self, files):
log_watcher.__init__(self, files, ["Meth:","Executor.doWork"],["Rslt:","Excp:"])
class saveAndReturnObject_watcher(log_watcher):
def __init__(self, files):
log_watcher.__init__(self, files, ["saveAndReturnObject"],["Rslt:","Excp:"],storeonce=["Args:"],storeall=["Adding log"])
# http://matplotlib.sourceforge.net/examples/api/line_with_text.html
class MyLine(lines.Line2D):
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
self.text = mtext.Text(0, 0, '')
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position((x[-1], y[-1]))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
def plot_threads(watcher, all_colors = ("blue","red","yellow","green","pink","purple")):
digit = re.compile(".*(\d+).*")
fig = plt.figure()
ax = fig.add_subplot(111)
first = None
last = None
colors = {}
for ll in watcher.gen():
last = ll.stop
if first is None:
first = ll.start
if ll.thread.strip() == "main":
t = -1
else:
try:
t = digit.match(ll.thread).group(1)
except:
print "Error parsing thread:", ll.thread
raise
y = np.array([int(t),int(t)])
x = np.array([ll.start-first, ll.stop-first])
c = colors.get(t,all_colors[0])
i = all_colors.index(c)
colors[t] = all_colors[ (i+1) % len(all_colors) ]
if True:
line = MyLine(x, y, c=c, lw=2, alpha=0.5)#, mfc='red')#, ms=12, label=str(len(ll.logs)))
#line.text.set_text('line label')
line.text.set_color('red')
#line.text.set_fontsize(16)
ax.add_line(line)
else:
# http://matplotlib.sourceforge.net/examples/pylab_examples/broken_barh.html
ax.broken_barh([ (110, 30), (150, 10) ] , (10, 9), facecolors='blue')
ax.set_ylim(-2,25)
ax.set_xlim(0, (last-first))
plt.show()
if __name__ == "__main__":
for g in allthreads_watcher(sys.argv).gen():
print "Date:%s\nElapsed:%s\nLevel:%s\nThread:%s\nMethod:%s\nStatus:%s\n\n" % (g.date, g.took, g.level, g.thread, g.message, g.status)
| gpl-2.0 |
hdmetor/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
YinongLong/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
garrettkatz/directional-fibers | dfibers/experiments/levy_opt/levy_opt.py | 1 | 6952 | """
Measure global optimization performance of Levy function
"""
import sys, time
import numpy as np
import matplotlib.pyplot as pt
import multiprocessing as mp
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.logging_utilities as lu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
import dfibers.examples.levy as lv
from mpl_toolkits.mplot3d import Axes3D
def run_trial(args):
basename, sample, timeout = args
stop_time = time.clock() + timeout
logfile = open("%s_s%d.log"%(basename,sample),"w")
# Set up fiber arguments
np.random.seed()
v = 20*np.random.rand(2,1) - 10 # random point in domain
c = lv.f(v) # direction at that point
c = c + 0.1*np.random.randn(2,1) # perturb for more variability
fiber_kwargs = {
"f": lv.f,
"ef": lv.ef,
"Df": lv.Df,
"compute_step_amount": lambda trace: (0.0001, 0),
"v": v,
"c": c,
"stop_time": stop_time,
"terminate": lambda trace: (np.fabs(trace.x[:-1]) > 10).any(),
"max_solve_iterations": 2**5,
}
solve_start = time.clock()
# Run in one direction
solution = sv.fiber_solver(
logger=lu.Logger(logfile).plus_prefix("+: "),
**fiber_kwargs)
X1 = np.concatenate(solution["Fiber trace"].points, axis=1)
V1 = solution["Fixed points"]
z = solution["Fiber trace"].z_initial
# print("Status: %s\n"%solution["Fiber trace"].status)
# Run in other direction (negate initial tangent)
solution = sv.fiber_solver(
z= -z,
logger=lu.Logger(logfile).plus_prefix("-: "),
**fiber_kwargs)
X2 = np.concatenate(solution["Fiber trace"].points, axis=1)
V2 = solution["Fixed points"]
# print("Status: %s\n"%solution["Fiber trace"].status)
# Join fiber segments
fiber = np.concatenate((np.fliplr(X1), X2), axis=1)
# Union solutions
fxpts = fx.sanitize_points(
np.concatenate((V1, V2), axis=1),
f = lv.f,
ef = lv.ef,
Df = lv.Df,
duplicates = lambda V, v: (np.fabs(V - v) < 10**-6).all(axis=0),
)
# Save results
with open("%s_s%d.npz"%(basename,sample), 'w') as rf: np.savez(rf, **{
"fxpts": fxpts,
"fiber": fiber,
"runtime": time.clock() - solve_start })
logfile.close()
def run_experiment(basename, num_samples, timeout, num_procs=0):
pool_args = []
for sample in range(num_samples):
pool_args.append((basename, sample, timeout))
if num_procs > 0:
num_procs = min(num_procs, mp.cpu_count())
print("using %d processes..."%num_procs)
pool = mp.Pool(processes=num_procs)
pool.map(run_trial, pool_args)
pool.close()
pool.join()
else:
for pa in pool_args: run_trial(pa)
def compile_results(basename, num_samples):
L = []
F = []
runtimes = []
for sample in range(num_samples):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
Fs = np.fabs(lv.f(fxpts)).max(axis=0)
Ls = lv.levy(fxpts)
within = (np.fabs(fxpts) < 10).all(axis=0)
mean_within = Ls[within].mean() if within.any() else np.nan
print("sample %d: %d secs, %d solns, mean %f, mean within %f, min %f"%(
sample, data["runtime"], len(Ls), Ls.mean(), mean_within, Ls.min()))
L.append(Ls)
F.append(Fs)
runtimes.append(data["runtime"])
counts = np.array([len(Ls) for Ls in L])
bests = np.array([Ls.min() for Ls in L])
resids = np.array([Fs.max() for Fs in F])
runtimes = np.array(runtimes)
print("avg count = %d, avg best = %f, avg resid = %f, best best = %f"%(
counts.mean(), bests.mean(), resids.mean(), bests.min()))
return counts, bests, runtimes
def plot_results(basename, num_samples, counts, bests, runtimes, timeout):
### Optimization order stats
pt.figure(figsize=(5,4))
pt.subplot(2,1,1)
pt.plot(np.sort(bests), '-k.')
pt.xlabel("Ordered samples")
pt.ylabel("Best objective value")
##### Work complexity
pt.subplot(2,1,2)
terms = (runtimes < timeout)
pt.plot(runtimes[terms], bests[terms], 'k+', markerfacecolor='none')
pt.plot(runtimes[~terms], bests[~terms], 'ko', markerfacecolor='none')
pt.legend(["terminated","timed out"])
pt.xlabel("Runtime (seconds)")
pt.ylabel("Best objective value")
pt.tight_layout()
pt.show()
### Fiber visuals
pt.figure(figsize=(4,7))
# objective fun
X_surface, Y_surface = np.mgrid[-10:10:100j,-10:10:100j]
L = lv.levy(np.array([X_surface.flatten(), Y_surface.flatten()])).reshape(X_surface.shape)
ax_surface = pt.gcf().add_subplot(2,1,1,projection="3d")
ax_surface.plot_surface(X_surface, Y_surface, L, linewidth=0, antialiased=False, color='gray')
ax_surface.set_xlabel("v0")
ax_surface.set_ylabel("v1")
ax_surface.set_zlabel("levy(v)")
ax_surface.view_init(azim=-80, elev=20)
# fibers
ax = pt.gcf().add_subplot(2,1,2)
X_grid, Y_grid = np.mgrid[-10:10:60j,-10:10:60j]
XY = np.array([X_grid.flatten(), Y_grid.flatten()])
C_XY = lv.f(XY)
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=10,units='xy',angles='xy')
num_plot_samples = 3
sort_idx = np.argsort(bests)
plot_idx = [0] + list(np.random.permutation(num_samples)[:num_plot_samples-1])
samples = sort_idx[plot_idx]
# samples = [41,73,20] # all through global
# samples = [41, 97, 11] # two through global
# samples = [41, 49, 13] # two through global, one horiz not through
# samples = [41, 46, 70] # one through global, one horiz
# samples = [41, 96, 27] # two through global, one almost horiz
samples = [41, 63, 28] # two through global, all interesting
print("samples:")
print(samples)
for i,sample in enumerate(samples[::-1]):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
fiber = data["fiber"][:,::]
L = lv.levy(fxpts).min()
col = 0.5*float(num_plot_samples-i-1)/num_plot_samples
print(sample,col)
ax.plot(fiber[0],fiber[1],color=(col,col,col,1), linestyle='-', linewidth=1)
pt.plot(fxpts[0],fxpts[1], 'o', color=(col,col,col,1))
pt.xlabel("v0")
pt.ylabel("v1",rotation=0)
pt.yticks(np.linspace(-10,10,5))
pt.xlim([-10,10])
pt.ylim([-10,10])
pt.tight_layout()
pt.show()
if __name__ == "__main__":
basename = "levy_opt"
num_samples = 100
num_plot_samples = 3
timeout = 60*30
num_procs = 10
# run_experiment(basename, num_samples=num_samples, timeout=timeout, num_procs=num_procs)
counts, bests, runtimes = compile_results(basename, num_samples)
plot_results(basename, num_samples, counts, bests, runtimes, timeout)
| mit |
pedro-aaron/stego-chi-2 | embeddingRgb.py | 1 | 2081 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Watermarkero, Mario, Ariel
"""
from PIL import Image
import random
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def marcarPixel(color, bitporinsertar):
if (color%2)==1:
if bitporinsertar==0:
color=color-1
elif (color%2)==0:
if bitporinsertar==1:
color=color+1
return color
def plotLsbRgb(img):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title('Imagen RGB')
ax1.imshow(img)
ax2.set_title('LSB RGB')
img=255*(img%2)
ax2.imshow(img)
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10,
right=0.95, hspace=0.3,wspace=0.35)
#imagen original
path="img3.jpg"
imgOriginal = np.array(Image.open(path))
nFilas, nCols, nCanales = imgOriginal.shape
#marca
key=41196
random.seed(key)
porcentajeDeimagenPorMarcar=50
sizeMarca = nCols*int(porcentajeDeimagenPorMarcar*(nFilas/100))
#marca = [random.randint(0,1) for i in range(sizeMarca)]
plotLsbRgb(imgOriginal)
#proceso de marcado
imgMarcada = imgOriginal.copy();
cont = 1 #contador del numero de bits inscrustados
#Proceso de incrustacion
for fila in range(0,nFilas):
for columna in range(0,nCols):
pixel=imgOriginal[fila,columna]
newPixel = [marcarPixel(
pixel[0],random.randint(0,1)),
marcarPixel(pixel[1],random.randint(0,1)),
marcarPixel(pixel[2],random.randint(0,1))]
imgMarcada[fila,columna] = newPixel
if cont >= sizeMarca:
break
cont = cont +1
if cont >= sizeMarca:
break
plotLsbRgb(imgMarcada)
image = Image.fromarray(imgMarcada, 'RGB')
image.save('ImagenMarcada.bmp')
print('Porciento de la imagen marcada: ' + str(porcentajeDeimagenPorMarcar)+'%')
print('bits incrustados: ' + str(sizeMarca*3))
print('Bytes incrustados: ' + str(sizeMarca*3/8))
print('KiloBytes incrustados: ' + str(sizeMarca*3/8/1024))
print('MegaBytes incrustados: ' + str(sizeMarca*3/8/1024/1024))
| mit |
akshaykr/oracle_cb | RegretExp.py | 1 | 6615 | import numpy as np
import sklearn.linear_model
import sklearn.tree
import Simulators, Logger, Evaluators, Semibandits, Metrics
import warnings
import argparse
import pickle
import sys
class RegretExp(object):
def __init__(self, weight=None, link="linear", K=10, L=5, T=1000, dataset="synth", feat_noise=0.25, reward_noise=1.0, policies="finite", structure='none'):
self.T = T
self.K = K
self.L = L
if weight == None:
weight = np.arange(1,self.L+1)
self.weight = weight
self.link = link
self.feat_noise = feat_noise
self.reward_noise = reward_noise
self.dataset = dataset
self.policies = policies
self.structure = structure
if self.dataset == "synth":
print("----Generating Semibandit Simulator----")
self.Sim = Simulators.OrderedSBSim(100,100,self.K,
self.L,self.feat_noise,
w_vec=self.weight,
link=self.link,
one_pass=False)
print("----Done----")
elif self.dataset == "mq2007":
print("----Generating MQ2007 Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='mq2007',
metric=Metrics.NavigationalTTS,
## metric=Metrics.NDCG,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./mq2007_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
elif self.dataset == "mq2008":
print("----Generating MQ2008 Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='mq2008',
metric=Metrics.NavigationalTTS,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./mq2008_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
elif self.dataset == 'yahoo':
print("----Generating Yahoo Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='yahoo',
## metric=Metrics.NDCG,
metric=Metrics.NavigationalTTS,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./yahoo_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
else:
print("Error invalid dataset")
sys.exit(1)
def run_alg(self, Alg, params={}):
A = Alg(self.Sim)
(reward, regret) = A.play(self.T,params=params,verbose=False)
return (reward, regret)
if __name__=='__main__':
warnings.simplefilter("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('--T', action='store',
default=1000,
help='number of rounds')
parser.add_argument('--link', action='store', choices=['linear', 'logistic'], default='linear')
parser.add_argument('--dataset', action='store', choices=['synth','mq2007','mq2008', 'yahoo'])
parser.add_argument('--policies', action='store', choices=['finite', 'tree', 'linear'], default='linear')
parser.add_argument('--K', action='store', default=10)
parser.add_argument('--L', action='store', default=5)
parser.add_argument('--structure', action='store', default='none', choices=['none','cluster'])
Args = parser.parse_args(sys.argv[1:])
print(Args)
Args.T = int(Args.T)
Args.K = int(Args.K)
Args.L = int(Args.L)
weight = np.arange(1,Args.L+1)[::-1] ## np.arange(1,Args.L+1,1)[::-1] ## /np.sum(np.arange(1,Args.L+1))
Algs = {
## 'EELS': Semibandits.EELS,
'EELS2': Semibandits.EELS2,
## 'Eps': Semibandits.EpsGreedy,
'EpsOracle': Semibandits.EpsGreedy,
## 'Random': Semibandits.Semibandit
}
Params = {
'EELS': {
'link': Args.link,
},
'EELS2': {
'link': Args.link,
},
'Eps': {
'reward': True,
},
'EpsOracle': {
'reward': False,
'weight': weight,
'link': Args.link
},
'Random': {}
}
if Args.dataset != "synth" and Args.policies == 'tree':
Params['EELS']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['EELS2']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['Eps']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['EpsOracle']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
if Args.dataset != "synth" and Args.policies == 'linear':
Params['EELS']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['EELS2']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['Eps']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['EpsOracle']['learning_alg'] = sklearn.linear_model.LinearRegression
Out = {
'EELS': [],
'EELS_regret': [],
'EELS2': [],
'EELS2_regret': [],
'Eps': [],
'Eps_regret': [],
'EpsOracle': [],
'EpsOracle_regret': [],
'Random': [],
'Random_regret': []
}
Exp = RegretExp(weight = weight, link=Args.link, K=Args.K, L=Args.L, T=Args.T, dataset=Args.dataset, policies=Args.policies,structure=Args.structure)
for i in range(10):
print('----Iter %d----' % (i))
for (k,v) in Algs.items():
print('----Running %s with params %s----' % (k, Params[k]))
(reward, regret) = Exp.run_alg(v, params=Params[k])
Out[k].append(reward)
Out[k+"_regret"].append(regret)
print('%s final: %0.3f' % (k, reward[-1]))
pickle.dump(Out, open("./data/%s_%s_%s_link=%s_T=%d_K=%d_L=%d.pkl" %(Args.dataset, Args.policies, Args.structure, Args.link, Args.T, Args.K, Args.L), "wb"))
| mit |
DeercoderResearch/0.5-CoCo | PythonAPI/getFoodImage.py | 2 | 3639 | from pycocotools.coco import COCO
from write_xml import write_to_file
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import os
import shutil
dataDir='..'
dataType='val2014'
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
coco=COCO(annFile) # load database
cats=coco.loadCats(coco.getCatIds())
print cats
foodCategory = []
foodCategoryId = []
foodImageId = []
for cat in cats:
if cat['supercategory'] == 'food':
foodCategory.append(cat['name'])
print cat['name']
foodCategoryId = coco.getCatIds(foodCategory)
foodImageId = coco.getImgIds(catIds=foodCategoryId) #must add catIds=
#print len(foodImageId)
dstdir = './JPEGImages/'
# Get all food images and copy them to JPEGImages folders.(#JPEGImage#)
for cat in range(0, len(foodImageId)):
img = coco.loadImgs(foodImageId[cat])[0]
img_name = '%s/images/%s/%s'%(dataDir,dataType,img['file_name'])
#print img_name
shutil.copy(img_name, dstdir)
# Generate the SegmentationObject/SegmentationClass (#Segmentation#)
dstdir = './SegmentationClass'
dstdir_2 = './SegmentationObject'
# Generate the configuration files for Annotation folders(#Annotation#)
# Move to the above the share the loop of image_names.
for cat in range(0, len(foodImageId)):
img = coco.loadImgs(foodImageId[cat])[0]
img_name = os.path.splitext(img['file_name'])[0]
img_annotation_xml_name ='./Annotations/%s.xml'%(img_name)
img_annotation_jpg_name ='./JPEGImages/%s.jpg'%(img_name)
# print img_annotation_xml_name
file = open(img_annotation_xml_name, "wb")
# def write_to_file(img_name,food_type, file_name, img_width, img_height,left_x, left_y, right_x, right_y):
img_width = img['width']
img_height = img['height']
## Now load annotation in order to get bbox, food type
ann_id = coco.getAnnIds(imgIds=img['id'])
print "ann_id"
print ann_id
## Note: for one image, there are multiple labels, find the food_label
ann = coco.loadAnns(ann_id)
for ann_food in ann:
ann_cat_id = ann_food['category_id']
ann_cat = coco.loadCats(ann_cat_id)[0]
if ann_cat['supercategory'] == 'food':
print ann_cat['name']
food_ann = ann_food
break
print "annotation"
print ann_food
bbox = ann_food['bbox']
catId = ann_food['category_id']
cat = coco.loadCats(catId)[0]
left_x = bbox[0]
left_y = bbox[1]
right_x = left_x + bbox[2]
right_y = left_y + bbox[3]
food_type = cat['name']
print img_annotation_jpg_name
if food_type == 'donut':
shutil.copy(img_annotation_jpg_name, "./donut/")
elif food_type == 'cake':
shutil.copy(img_annotation_jpg_name, "./cake/")
elif food_type == 'hot dog':
shutil.copy(img_annotation_jpg_name, "./hotdog/")
elif food_type == 'sandwich':
shutil.copy(img_annotation_jpg_name, "./sandwich/")
elif food_type == 'carrot':
shutil.copy(img_annotation_jpg_name, "./carrot/")
elif food_type == 'apple':
shutil.copy(img_annotation_jpg_name, "./apple/")
elif food_type == 'orange':
shutil.copy(img_annotation_jpg_name, "./orange/")
elif food_type == 'banana':
shutil.copy(img_annotation_jpg_name, "./banana/")
elif food_type == 'pizza':
shutil.copy(img_annotation_jpg_name, "./pizza/")
write_to_file(img_annotation_jpg_name, food_type, img_annotation_xml_name, str(img_width), str(img_height), str(left_x), str(left_y), str(right_x), str(right_y))
file.close()
### ??????
# Generat the configuration for ImageSet
img = coco.loadImgs(foodImageId[5])[0]
img_name = '%s/images/%s/%s'%(dataDir,dataType,img['file_name'])
I = io.imread(img_name)
#plt.figure()
#plt.imshow(I)
#plt.show()
# JUST FOR DEBUGGING
print foodCategory
print foodCategoryId
print foodImageId
print img_name
| bsd-2-clause |
planetarymike/IDL-Colorbars | IDL_py_test/018_Pastels.py | 1 | 5628 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 0., 0.282353],
[1., 0., 0.282353],
[1., 0., 0.290196],
[1., 0., 0.298039],
[1., 0., 0.305882],
[1., 0., 0.313725],
[1., 0., 0.321569],
[1., 0., 0.329412],
[1., 0., 0.337255],
[1., 0., 0.345098],
[1., 0., 0.352941],
[1., 0., 0.356863],
[1., 0., 0.364706],
[1., 0., 0.372549],
[1., 0., 0.380392],
[1., 0., 0.388235],
[1., 0., 0.396078],
[1., 0., 0.403922],
[1., 0., 0.411765],
[1., 0., 0.419608],
[1., 0., 0.427451],
[1., 0., 0.435294],
[1., 0., 0.443137],
[1., 0., 0.45098],
[1., 0., 0.458824],
[1., 0., 0.466667],
[1., 0., 0.47451],
[1., 0., 0.482353],
[1., 0., 0.490196],
[1., 0., 0.498039],
[1., 0., 0.505882],
[1., 0., 0.513725],
[1., 0., 0.521569],
[1., 0., 0.529412],
[1., 0., 0.537255],
[1., 0., 0.545098],
[1., 0., 0.552941],
[1., 0., 0.556863],
[1., 0., 0.564706],
[1., 0., 0.572549],
[1., 0., 0.580392],
[1., 0., 0.588235],
[1., 0., 0.596078],
[1., 0., 0.603922],
[1., 0., 0.611765],
[1., 0., 0.619608],
[1., 0., 0.627451],
[1., 0., 0.635294],
[1., 0., 0.643137],
[1., 0., 0.65098],
[1., 0., 0.658824],
[1., 0., 0.666667],
[1., 0., 0.67451],
[1., 0., 0.682353],
[1., 0., 0.690196],
[1., 0., 0.698039],
[1., 0., 0.705882],
[1., 0., 0.713725],
[1., 0., 0.721569],
[1., 0., 0.729412],
[1., 0., 0.737255],
[1., 0., 0.745098],
[1., 0., 0.74902],
[1., 0., 0.756863],
[1., 0., 0.764706],
[1., 0., 0.772549],
[1., 0., 0.780392],
[1., 0., 0.788235],
[1., 0., 0.796078],
[1., 0., 0.803922],
[1., 0., 0.811765],
[1., 0., 0.819608],
[1., 0., 0.827451],
[1., 0., 0.835294],
[1., 0., 0.843137],
[1., 0., 0.85098],
[1., 0., 0.858824],
[1., 0., 0.866667],
[1., 0., 0.87451],
[1., 0., 0.882353],
[1., 0., 0.890196],
[1., 0., 0.898039],
[1., 0., 0.905882],
[1., 0., 0.913725],
[1., 0., 0.921569],
[1., 0., 0.929412],
[1., 0., 0.937255],
[1., 0., 0.945098],
[1., 0., 0.94902],
[1., 0., 0.956863],
[1., 0., 0.964706],
[1., 0., 0.972549],
[1., 0., 0.980392],
[1., 0., 0.988235],
[1., 0., 0.996078],
[0.992157, 0., 1.],
[0.984314, 0., 1.],
[0.976471, 0., 1.],
[0.968627, 0., 1.],
[0.960784, 0., 1.],
[0.952941, 0., 1.],
[0.945098, 0., 1.],
[0.937255, 0., 1.],
[0.929412, 0., 1.],
[0.921569, 0., 1.],
[0.913725, 0., 1.],
[0.905882, 0., 1.],
[0.898039, 0., 1.],
[0.890196, 0., 1.],
[0.882353, 0., 1.],
[0.87451, 0., 1.],
[0.866667, 0., 1.],
[0.858824, 0., 1.],
[0.85098, 0., 1.],
[0.847059, 0., 1.],
[0.839216, 0., 1.],
[0.831373, 0., 1.],
[0.823529, 0., 1.],
[0.815686, 0., 1.],
[0.807843, 0., 1.],
[0.8, 0., 1.],
[0.792157, 0., 1.],
[0.784314, 0., 1.],
[0.776471, 0., 1.],
[0.768627, 0., 1.],
[0.760784, 0., 1.],
[0.752941, 0., 1.],
[0.745098, 0., 1.],
[0.737255, 0., 1.],
[0.729412, 0., 1.],
[0., 0.54902, 1.],
[0., 0.572549, 1.],
[0., 0.596078, 1.],
[0., 0.615686, 1.],
[0., 0.639216, 1.],
[0., 0.662745, 1.],
[0., 0.682353, 1.],
[0., 0.705882, 1.],
[0., 0.729412, 1.],
[0., 0.752941, 1.],
[0., 0.772549, 1.],
[0., 0.796078, 1.],
[0., 0.819608, 1.],
[0., 0.839216, 1.],
[0., 0.862745, 1.],
[0., 0.886275, 1.],
[0., 0.909804, 1.],
[0., 0.929412, 1.],
[0., 0.952941, 1.],
[0., 0.976471, 1.],
[0., 1., 1.],
[0., 1., 0.976471],
[0., 1., 0.952941],
[0., 1., 0.929412],
[0., 1., 0.909804],
[0., 1., 0.886275],
[0., 1., 0.862745],
[0., 1., 0.839216],
[0., 1., 0.819608],
[0., 1., 0.796078],
[0., 1., 0.772549],
[0., 1., 0.752941],
[0., 1., 0.729412],
[0., 1., 0.705882],
[0., 1., 0.682353],
[0., 1., 0.662745],
[0., 1., 0.639216],
[0., 1., 0.615686],
[0., 1., 0.596078],
[0., 1., 0.572549],
[0., 1., 0.54902],
[0., 1., 0.52549],
[0., 1., 0.505882],
[0., 1., 0.482353],
[0., 1., 0.458824],
[0., 1., 0.439216],
[0., 1., 0.415686],
[0., 1., 0.392157],
[0., 1., 0.368627],
[0., 1., 0.34902],
[0., 1., 0.32549],
[0., 1., 0.301961],
[0., 1., 0.278431],
[0., 1., 0.258824],
[0., 1., 0.235294],
[0., 1., 0.211765],
[0., 1., 0.192157],
[0., 1., 0.168627],
[0., 1., 0.145098],
[0., 1., 0.121569],
[0., 1., 0.101961],
[0., 1., 0.0784314],
[0., 1., 0.054902],
[0., 1., 0.0352941],
[0., 1., 0.0117647],
[0.00784314, 1., 0.],
[0.0313725, 1., 0.],
[0.0509804, 1., 0.],
[0.0745098, 1., 0.],
[0.0980392, 1., 0.],
[0.117647, 1., 0.],
[0.141176, 1., 0.],
[0.164706, 1., 0.],
[0.188235, 1., 0.],
[0.207843, 1., 0.],
[0.231373, 1., 0.],
[0.254902, 1., 0.],
[0.278431, 1., 0.],
[0.298039, 1., 0.],
[0.321569, 1., 0.],
[0.345098, 1., 0.],
[0.364706, 1., 0.],
[0.388235, 1., 0.],
[0.411765, 1., 0.],
[0.435294, 1., 0.],
[0.454902, 1., 0.],
[0.478431, 1., 0.],
[0.501961, 1., 0.],
[0.521569, 1., 0.],
[0.545098, 1., 0.],
[0.568627, 1., 0.],
[0.592157, 1., 0.],
[0.611765, 1., 0.],
[0.635294, 1., 0.],
[0.658824, 1., 0.],
[0.678431, 1., 0.],
[0.701961, 1., 0.],
[0.72549, 1., 0.],
[0.74902, 1., 0.],
[0.768627, 1., 0.],
[0.792157, 1., 0.],
[0.815686, 1., 0.],
[0.839216, 1., 0.],
[0.858824, 1., 0.],
[0.882353, 1., 0.],
[0.905882, 1., 0.],
[0.92549, 1., 0.],
[0.94902, 1., 0.],
[0.972549, 1., 0.],
[0.996078, 1., 0.],
[1., 0.980392, 0.],
[1., 0.956863, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.890196, 0.],
[1., 0.866667, 0.],
[1., 0.843137, 0.],
[1., 0.823529, 0.],
[1., 0.8, 0.],
[1., 0.776471, 0.],
[1., 0.756863, 0.],
[1., 0.733333, 0.],
[1., 0.709804, 0.],
[1., 0.686275, 0.],
[1., 0.666667, 0.],
[1., 0.666667, 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backends/backend_wx.py | 4 | 65344 | """
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue (jeremy@o-donoghue.com)
Derived from original copyright work by John Hunter
(jdhunter@ace.bsd.uchicago.edu)
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
import sys
import os
import os.path
import math
import weakref
import warnings
import numpy as np
import matplotlib
from matplotlib.backend_bases import (RendererBase, GraphicsContextBase,
FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
cursors, TimerBase)
from matplotlib.backend_bases import ShowBase
from matplotlib.backend_bases import _has_pil
from matplotlib._pylab_helpers import Gcf
from matplotlib.cbook import (is_string_like, is_writable_file_like,
warn_deprecated)
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
from . import wx_compat as wxc
import wx
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback
import pdb
_DEBUG_lvls = {1: 'Low ', 2: 'Med ', 3: 'High', 4: 'Error'}
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print("%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls))
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print()
pdb.pm() # jdh uncomment
class fake_stderr(object):
"""
Wx does strange things with stderr, as it makes the assumption that
there is probably no console. This redirects stderr to the console, since
we know that there is one!
"""
def write(self, msg):
print("Stderr: %s\n\r" % msg)
#if _DEBUG < 5:
#sys.excepthook = debug_on_error
#WxLogger =wx.LogStderr()
#sys.stderr = fake_stderr
# the True dots per inch on the screen; should be display dependent
# see
# http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5
# for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog = wx.MessageDialog(parent=parent,
message=msg,
caption='Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class TimerWx(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses WxTimer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timer event to our handler.
# For WX, the events have to use a widget for binding.
self.parent = parent
self._timer = wx.Timer(self.parent, wx.NewId())
self.parent.Bind(wx.EVT_TIMER, self._on_timer, self._timer)
# Unbinding causes Wx to stop for some reason. Disabling for now.
# def __del__(self):
# TimerBase.__del__(self)
# self.parent.Bind(wx.EVT_TIMER, None, self._timer)
def _timer_start(self):
self._timer.Start(self._interval, self._single)
def _timer_stop(self):
self._timer.Stop()
def _timer_set_interval(self):
self._timer_start()
def _timer_set_single_shot(self):
self._timer.Start()
def _on_timer(self, *args):
TimerBase._on_timer(self)
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
# In wxPython, drawing is performed on a wxDC instance, which will
# generally be mapped to the client aread of the window displaying
# the plot. Under wxPython, the wxDC instance has a wx.Pen which
# describes the colour and weight of any lines drawn, and a wxBrush
# which describes the fill colour of any closed polygon.
fontweights = wxc.fontweights
fontangles = wxc.fontangles
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = wxc.fontnames
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
warn_deprecated('2.0', message="The WX backend is "
"deprecated. It's untested "
"and will be removed in Matplotlib 2.2. "
"Use the WXAgg backend instead. "
"See Matplotlib usage FAQ for more info on backends.",
alternative='WXAgg')
RendererBase.__init__(self)
DEBUG_MSG("__init__()", 1, self)
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
# return 1, 1
if ismath:
s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0],
self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
@staticmethod
def convert_path(gfx_ctx, path, transform):
wxpath = gfx_ctx.CreatePath()
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
wxpath = self.convert_path(gfx_ctx, path, transform)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.width
h = self.height
rows, cols = im.shape[:2]
bitmap = wxc.BitmapFromBuffer(cols, rows, im.tostring())
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap, int(l), int(self.height - b),
int(w), int(-h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y - h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc is not None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font = wx.Font(int(size + 0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points * (PIXELS_PER_INCH / 72.0 * self.dpi / 72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = {'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND}
_joind = {'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND}
_dashd_wx = wxc.dashd_wx
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
DEBUG_MSG("__init__() 2: %s" % bitmap, 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGBA=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
w = float(w)
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w > 0 and w < 1:
w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw == 0:
lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH # Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(
red=int(r),
green=int(g),
blue=int(b),
alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wx.Sizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL: 'control',
wx.WXK_SHIFT: 'shift',
wx.WXK_ALT: 'alt',
wx.WXK_LEFT: 'left',
wx.WXK_UP: 'up',
wx.WXK_RIGHT: 'right',
wx.WXK_DOWN: 'down',
wx.WXK_ESCAPE: 'escape',
wx.WXK_F1: 'f1',
wx.WXK_F2: 'f2',
wx.WXK_F3: 'f3',
wx.WXK_F4: 'f4',
wx.WXK_F5: 'f5',
wx.WXK_F6: 'f6',
wx.WXK_F7: 'f7',
wx.WXK_F8: 'f8',
wx.WXK_F9: 'f9',
wx.WXK_F10: 'f10',
wx.WXK_F11: 'f11',
wx.WXK_F12: 'f12',
wx.WXK_SCROLL: 'scroll_lock',
wx.WXK_PAUSE: 'break',
wx.WXK_BACK: 'backspace',
wx.WXK_RETURN: 'enter',
wx.WXK_INSERT: 'insert',
wx.WXK_DELETE: 'delete',
wx.WXK_HOME: 'home',
wx.WXK_END: 'end',
wx.WXK_PAGEUP: 'pageup',
wx.WXK_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_NUMPAD_ADD: '+',
wx.WXK_NUMPAD_SUBTRACT: '-',
wx.WXK_NUMPAD_MULTIPLY: '*',
wx.WXK_NUMPAD_DIVIDE: '/',
wx.WXK_NUMPAD_DECIMAL: 'dec',
wx.WXK_NUMPAD_ENTER: 'enter',
wx.WXK_NUMPAD_UP: 'up',
wx.WXK_NUMPAD_RIGHT: 'right',
wx.WXK_NUMPAD_DOWN: 'down',
wx.WXK_NUMPAD_LEFT: 'left',
wx.WXK_NUMPAD_PAGEUP: 'pageup',
wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD_HOME: 'home',
wx.WXK_NUMPAD_END: 'end',
wx.WXK_NUMPAD_INSERT: 'insert',
wx.WXK_NUMPAD_DELETE: 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l, b, w, h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn(
"could not find a setinitialsize function for backend_wx; "
"please report your wxpython version=%s "
"to the matplotlib developers list" %
wxc.backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize',
do_nothing)
if not hasattr(self, 'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible',
lambda *args: True)
# Create the drawing bitmap
self.bitmap = wxc.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w, h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
self.Bind(wx.EVT_SIZE, self._onSize)
self.Bind(wx.EVT_PAINT, self._onPaint)
self.Bind(wx.EVT_KEY_DOWN, self._onKeyDown)
self.Bind(wx.EVT_KEY_UP, self._onKeyUp)
self.Bind(wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self._onRightButtonDClick)
self.Bind(wx.EVT_RIGHT_UP, self._onRightButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
self.Bind(wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
self.Bind(wx.EVT_LEFT_DCLICK, self._onLeftButtonDClick)
self.Bind(wx.EVT_LEFT_UP, self._onLeftButtonUp)
self.Bind(wx.EVT_MOTION, self._onMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self._onEnter)
self.Bind(wx.EVT_IDLE, self._onIdle)
# Add middle button events
self.Bind(wx.EVT_MIDDLE_DOWN, self._onMiddleButtonDown)
self.Bind(wx.EVT_MIDDLE_DCLICK, self._onMiddleButtonDClick)
self.Bind(wx.EVT_MIDDLE_UP, self._onMiddleButtonUp)
self.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self._onCaptureLost)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self._onCaptureLost)
if wx.VERSION_STRING < "2.9":
# only needed in 2.8 to reduce flicker
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
else:
# this does the same in 2.9+
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
self.macros = {} # dict from wx id to seq of macros
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
if not wx.TheClipboard.IsOpened():
open_success = wx.TheClipboard.Open()
if open_success:
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
wx.TheClipboard.Flush()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Triggering a paint event is all that is needed to defer drawing
# until later. The platform will send the event when it thinks it is
# a good time (usually as soon as there are no other events pending).
self.Refresh(eraseBackground=False)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only
for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerWx(self, *args, **kwargs)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout * 1000, oneShot=True)
self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wxc.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
if hasattr(self, '_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = sorted(filetypes.items())
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None, origin='WX'):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied wx.PaintDC device context.
The 'WXAgg' backend sets origin accordingly.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if not drawDC:
# not called from OnPaint use a ClientDC
drawDC = wx.ClientDC(self)
# following is for 'WX' backend on Windows
# the bitmap can not be in use by another DC,
# see GraphicsContextWx._cache
if wx.Platform == '__WXMSW__' and origin == 'WX':
img = self.bitmap.ConvertToImage()
bmp = img.ConvertToBitmap()
drawDC.DrawBitmap(bmp, 0, 0)
else:
drawDC.DrawBitmap(self.bitmap, 0, 0)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
if not _has_pil:
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG,
*args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
if not _has_pil:
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF,
*args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l, b, width, height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wxc.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# image is the object that we call SaveFile on.
image = self.bitmap
# set the JPEG quality appropriately. Unfortunately, it is only
# possible to set the quality on a wx.Image object. So if we
# are saving a JPEG, convert the wx.Bitmap to a wx.Image,
# and set the quality.
if filetype == wx.BITMAP_TYPE_JPEG:
jpeg_quality = kwargs.get('quality',
rcParams['savefig.jpeg_quality'])
image = self.bitmap.ConvertToImage()
image.SetOption(wx.IMAGE_OPTION_QUALITY, str(jpeg_quality))
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not image.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
elif is_writable_file_like(filename):
if not isinstance(image, wx.Image):
image = image.ConvertToImage()
if not image.SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap = wxc.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return # Empty figure
dpival = self.figure.dpi
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
FigureCanvasBase.resize_event(self)
def _get_key(self, evt):
keyval = evt.KeyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval < 256:
key = chr(keyval)
# wx always returns an uppercase, so make it lowercase if the shift
# key is not depressed (NOTE: this will not handle Caps Lock)
if not evt.ShiftDown():
key = key.lower()
else:
key = None
for meth, prefix in (
[evt.AltDown, 'alt'],
[evt.ControlDown, 'ctrl'], ):
if meth():
key = '{0}+{1}'.format(prefix, key)
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
# print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _set_capture(self, capture=True):
"""control wx mouse capture """
if self.HasCapture():
self.ReleaseMouse()
if capture:
self.CaptureMouse()
def _onCaptureLost(self, evt):
"""Capture changed or lost"""
self._set_capture(False)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 3,
dblclick=True, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 1,
dblclick=True, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
# Add middle button events
def _onMiddleButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 2, guiEvent=evt)
def _onMiddleButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 2,
dblclick=True, guiEvent=evt)
def _onMiddleButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 2, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
# print "delta,rotation,rate",delta,rotation,rate
step = rate * float(rotation) / delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self, '_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent=evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent=evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.App instance if it has not been created sofar.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.App(False)
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
needmain = not wx.App.IsMainLoopRunning()
if needmain:
wxapp = wx.GetApp()
if wxapp is not None:
wxapp.MainLoop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
fig = figure
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos = wx.Point(20, 20)
l, b, w, h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.canvas.SetFocus()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
if wxc.is_phoenix:
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.canvas.SetMinSize((2, 2))
# give the window a matplotlib icon rather than the stock one.
# This is not currently working on Linux and is untested elsewhere.
# icon_path = os.path.join(matplotlib.rcParams['datapath'],
# 'images', 'matplotlib.png')
#icon = wx.IconFromBitmap(wx.Bitmap(icon_path))
# for xpm type icons try:
#icon = wx.Icon(icon_path, wx.BITMAP_TYPE_XPM)
# self.SetIcon(icon)
self.figmgr = FigureManagerWx(self.canvas, num, self)
self.Bind(wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.close_event()
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
# self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
try:
self.canvas.mpl_disconnect(self.toolbar._idDrag)
# Rationale for line above: see issue 2941338.
except AttributeError:
pass # classic toolbar lacks the attribute
if not self.IsBeingDeleted():
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb is not None:
self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def show(self):
self.frame.Show()
self.canvas.draw()
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
def get_window_title(self):
return self.window.GetTitle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU = wx.NewId()
_NTB_AXISMENU_BUTTON = wx.NewId()
_NTB_X_PAN_LEFT = wx.NewId()
_NTB_X_PAN_RIGHT = wx.NewId()
_NTB_X_ZOOMIN = wx.NewId()
_NTB_X_ZOOMOUT = wx.NewId()
_NTB_Y_PAN_UP = wx.NewId()
_NTB_Y_PAN_DOWN = wx.NewId()
_NTB_Y_ZOOMIN = wx.NewId()
_NTB_Y_ZOOMOUT = wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE = wx.NewId()
_NTB_CLOSE = wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'], 'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying' % bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu = wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId = wx.NewId()
self._invertId = wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected",
False)
self._menu.AppendSeparator()
self.Bind(wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
self.Bind(wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
self.Bind(wx.EVT_MENU, self._handleInvertAxesSelected,
id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
if wxc.is_phoenix:
x, y = self.GetPosition()
w, h = self.GetSize()
else:
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y + h - 4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
# Lines above would be deleted based on svn tracker ID 2841525;
# not clear whether this matters or not.
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId = wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i,
"Select axis %d" % i,
True)
self._menu.Check(menuId, True)
self.Bind(wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
elif maxAxis < len(self._axisId):
for menuId in self._axisId[maxAxis:]:
self._menu.Delete(menuId)
self._axisId = self._axisId[:maxAxis]
self._toolbar.set_active(list(xrange(maxAxis)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e + 1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE: wx.CURSOR_HAND,
cursors.HAND: wx.CURSOR_HAND,
cursors.POINTER: wx.CURSOR_ARROW,
cursors.SELECT_REGION: wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
self.prevZoomRect = None
# for now, use alternate zoom-rectangle drawing on all
# Macs. N.B. In future versions of wx it may be possible to
# detect Retina displays with window.GetContentScaleFactor()
# and/or dc.GetContentScaleFactor()
self.retinaFix = 'wxMac' in wx.PlatformInfo
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
self.wx_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.AddSeparator()
continue
self.wx_ids[text] = wx.NewId()
wxc._AddTool(self, self.wx_ids, text,
_load_bitmap(image_file + '.png'),
tooltip_text)
self.Bind(wx.EVT_TOOL, getattr(self, callback),
id=self.wx_ids[text])
self.Realize()
def zoom(self, *args):
self.ToggleTool(self.wx_ids['Pan'], False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self.wx_ids['Zoom'], False)
NavigationToolbar2.pan(self, *args)
def configure_subplots(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save_figure(self, *args):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = self.canvas.get_default_filename()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG(
'Save file dir:%s name:%s' %
(dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
# looks like they forgot to set the image type drop
# down, going with the extension.
warnings.warn(
'extension %s did not match the selected '
'image type %s; going with %s' %
(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception as e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor = wxc.Cursor(cursord[cursor])
self.canvas.SetCursor(cursor)
def release(self, event):
try:
del self.lastrect
except AttributeError:
pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
if not self.retinaFix:
self.wxoverlay = wx.Overlay()
else:
self.savedRetinaImage = self.canvas.copy_from_bbox(
self.canvas.figure.gca().bbox)
self.zoomStartX = event.xdata
self.zoomStartY = event.ydata
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
if not self.retinaFix:
self.wxoverlay.Reset()
del self.wxoverlay
else:
del self.savedRetinaImage
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.prevZoomRect = None
def draw_rubberband(self, event, x0, y0, x1, y1):
if self.retinaFix: # On Macs, use the following code
# wx.DCOverlay does not work properly on Retina displays.
rubberBandColor = '#C0C0FF'
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.canvas.restore_region(self.savedRetinaImage)
X0, X1 = self.zoomStartX, event.xdata
Y0, Y1 = self.zoomStartY, event.ydata
lineX = (X0, X0, X1, X1, X0)
lineY = (Y0, Y1, Y1, Y0, Y0)
self.prevZoomRect = self.canvas.figure.gca().plot(
lineX, lineY, '-', color=rubberBandColor)
self.canvas.figure.gca().draw_artist(self.prevZoomRect[0])
self.canvas.blit(self.canvas.figure.gca().bbox)
return
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1 < y0:
y0, y1 = y1, y0
if x1 < y0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wxc.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b, a = color.Get(True)
color.Set(r, g, b, 0x60)
dc.SetBrush(wx.Brush(color))
if wxc.is_phoenix:
dc.DrawRectangle(rect)
else:
dc.DrawRectangleRect(rect)
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None:
self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self.wx_ids['Back'], can_backward)
self.EnableTool(self.wx_ids['Forward'], can_forward)
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
# self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
# def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""
Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5, margin=0.5, title='matplotlib'):
wx.Printout.__init__(self, title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
# current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw, pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw, pgh) = self.GetPageSizePixels() # page size in pixels
(dcw, dch) = dc.GetSize()
if wxc.is_phoenix:
(grw, grh) = self.canvas.GetSize()
else:
(grw, grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth(
int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight(
int(self.canvas.bitmap.GetHeight() * vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview():
page_scale = float(dcw) / pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale) / float(grw)
dc.SetDeviceOrigin(left_margin, top_margin)
dc.SetUserScale(user_scale, user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasWx
FigureManager = FigureManagerWx
Toolbar = NavigationToolbar2Wx
| bsd-3-clause |
aflaxman/mpld3 | mpld3/plugins.py | 7 | 25402 | """
Plugins to add behavior to mpld3 charts
=======================================
Plugins are means of adding additional javascript features to D3-rendered
matplotlib plots. A number of plugins are defined here; it is also possible
to create nearly any imaginable behavior by defining your own custom plugin.
"""
__all__ = ['connect', 'clear', 'get_plugins', 'PluginBase',
'Reset', 'Zoom', 'BoxZoom',
'PointLabelTooltip', 'PointHTMLTooltip', 'LineLabelTooltip',
'MousePosition']
import collections
import json
import uuid
import matplotlib
from .utils import get_id
def get_plugins(fig):
"""Get the list of plugins in the figure"""
connect(fig)
return fig.mpld3_plugins
def connect(fig, *plugins):
"""Connect one or more plugins to a figure
Parameters
----------
fig : matplotlib Figure instance
The figure to which the plugins will be connected
*plugins :
Additional arguments should be plugins which will be connected
to the figure.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), '-k')
>>> plugins.connect(fig, plugins.LineLabelTooltip(lines[0]))
"""
if not isinstance(fig, matplotlib.figure.Figure):
raise ValueError("plugins.connect: first argument must be a figure")
if not hasattr(fig, 'mpld3_plugins'):
fig.mpld3_plugins = DEFAULT_PLUGINS[:]
for plugin in plugins:
fig.mpld3_plugins.append(plugin)
def clear(fig):
"""Clear all plugins from the figure, including defaults"""
fig.mpld3_plugins = []
class PluginBase(object):
def get_dict(self):
return self.dict_
def javascript(self):
if hasattr(self, "JAVASCRIPT"):
if hasattr(self, "js_args_"):
return self.JAVASCRIPT.render(self.js_args_)
else:
return self.JAVASCRIPT
else:
return ""
def css(self):
if hasattr(self, "css_"):
return self.css_
else:
return ""
class Reset(PluginBase):
"""A Plugin to add a reset button"""
dict_ = {"type": "reset"}
class MousePosition(PluginBase):
"""A Plugin to display coordinates for the current mouse position
Example
-------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, plugins.MousePosition())
>>> fig_to_html(fig)
"""
def __init__(self, fontsize=12, fmt=".3g"):
self.dict_ = {"type": "mouseposition",
"fontsize": fontsize,
"fmt": fmt}
class Zoom(PluginBase):
"""A Plugin to add zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "zoom",
"button": button,
"enabled": enabled}
class BoxZoom(PluginBase):
"""A Plugin to add box-zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "boxzoom",
"button": button,
"enabled": enabled}
class PointLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : array or None
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, PointLabelTooltip(points[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "tooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LineLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over a line.
Parameters
----------
line : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), 'o')
>>> plugins.connect(fig, LineLabelTooltip(lines[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, label=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
self.dict_ = {"type": "tooltip",
"id": get_id(points),
"labels": label if label is None else [label],
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LinkedBrush(PluginBase):
"""A Plugin to enable linked brushing between plots
Parameters
----------
points : matplotlib Collection or Line2D object
A representative of the scatter plot elements to brush.
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. default=True.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpld3 import fig_to_html, plugins
>>> X = np.random.random((3, 100))
>>> fig, ax = plt.subplots(3, 3)
>>> for i in range(2):
... for j in range(2):
... points = ax[i, j].scatter(X[i], X[j])
>>> plugins.connect(fig, LinkedBrush(points))
>>> fig_to_html(fig)
Notes
-----
Notice that in the above example, only one of the four sets of points is
passed to the plugin. This is all that is needed: for the sake of efficient
data storage, mpld3 keeps track of which plot objects draw from the same
data.
Also note that for the linked brushing to work correctly, the data must
not contain any NaNs. The presence of NaNs makes the different data views
have different sizes, so that mpld3 is unable to link the related points.
"""
def __init__(self, points, button=True, enabled=True):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedbrush",
"button": button,
"enabled": enabled,
"id": get_id(points, suffix)}
class PointHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> labels = ['<h1>{title}</h1>'.format(title=i) for i in range(10)]
>>> plugins.connect(fig, PointHTMLTooltip(points[0], labels))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
hoffset:0,
voffset:10};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(labels[i])
.style("visibility", "visible");})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
self.points = points
self.labels = labels
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "htmltooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset}
class LineHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
The label for the line, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10))
>>> label = '<h1>line {title}</h1>'.format(title='A')
>>> plugins.connect(fig, LineHTMLTooltip(lines[0], label))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("linehtmltooltip", LineHTMLTooltip);
LineHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype);
LineHTMLTooltip.prototype.constructor = LineHTMLTooltip;
LineHTMLTooltip.prototype.requiredProps = ["id"];
LineHTMLTooltip.prototype.defaultProps = {label:null,
hoffset:0,
voffset:10};
function LineHTMLTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHTMLTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id, this.fig);
var label = this.props.label
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(label)
.style("visibility", "visible");
})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");})
};
"""
def __init__(self, line, label=None,
hoffset=0, voffset=10,
css=None):
self.line = line
self.label = label
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
self.dict_ = {"type": "linehtmltooltip",
"id": get_id(line),
"label": label,
"hoffset": hoffset,
"voffset": voffset}
class InteractiveLegendPlugin(PluginBase):
"""A plugin for an interactive legends.
Inspired by http://bl.ocks.org/simzou/6439398
Parameters
----------
plot_elements : iterable of matplotlib elements
the elements to associate with a given legend items
labels : iterable of strings
The labels for each legend element
ax : matplotlib axes instance, optional
the ax to which the legend belongs. Default is the first
axes. The legend will be plotted to the right of the specified
axes
alpha_unsel : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is unselected.
Default is 0.2
alpha_over : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is overlaid.
Default is 1 (no effect), 1.5 works nicely !
start_visible : boolean, optional (could be a list of booleans)
defines if objects should start selected on not.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> N_paths = 5
>>> N_steps = 100
>>> x = np.linspace(0, 10, 100)
>>> y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
>>> y = y.cumsum(1)
>>> fig, ax = plt.subplots()
>>> labels = ["a", "b", "c", "d", "e"]
>>> line_collections = ax.plot(x, y.T, lw=4, alpha=0.6)
>>> interactive_legend = plugins.InteractiveLegendPlugin(line_collections,
... labels,
... alpha_unsel=0.2,
... alpha_over=1.5,
... start_visible=True)
>>> plugins.connect(fig, interactive_legend)
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["element_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {"ax":null,
"alpha_unsel":0.2,
"alpha_over":1.0,
"start_visible":true}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var alpha_unsel = this.props.alpha_unsel;
var alpha_over = this.props.alpha_over;
var legendItems = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {};
obj.label = this.props.labels[i];
var element_id = this.props.element_ids[i];
mpld3_elements = [];
for(var j=0; j<element_id.length; j++){
var mpld3_element = mpld3.get_element(element_id[j], this.fig);
// mpld3_element might be null in case of Line2D instances
// for we pass the id for both the line and the markers. Either
// one might not exist on the D3 side
if(mpld3_element){
mpld3_elements.push(mpld3_element);
}
}
obj.mpld3_elements = mpld3_elements;
obj.visible = this.props.start_visible[i]; // should become be setable from python side
legendItems.push(obj);
set_alphas(obj, false);
}
// determine the axes with which this legend is associated
var ax = this.props.ax
if(!ax){
ax = this.fig.axes[0];
} else{
ax = mpld3.get_element(ax, this.fig);
}
// add a legend group to the canvas of the figure
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(legendItems)
.enter().append("rect")
.attr("height", 10)
.attr("width", 25)
.attr("x", ax.width + ax.position[0] + 25)
.attr("y",function(d,i) {
return ax.position[1] + i * 25 + 10;})
.attr("stroke", get_color)
.attr("class", "legend-box")
.style("fill", function(d, i) {
return d.visible ? get_color(d) : "white";})
.on("click", click).on('mouseover', over).on('mouseout', out);
// add the labels
legend.selectAll("text")
.data(legendItems)
.enter().append("text")
.attr("x", function (d) {
return ax.width + ax.position[0] + 25 + 40;})
.attr("y", function(d,i) {
return ax.position[1] + i * 25 + 10 + 10 - 1;})
.text(function(d) { return d.label });
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
return d.visible ? get_color(d) : "white";
})
set_alphas(d, false);
};
// specify the action on legend overlay
function over(d,i){
set_alphas(d, true);
};
// specify the action on legend overlay
function out(d,i){
set_alphas(d, false);
};
// helper function for setting alphas
function set_alphas(d, is_over){
for(var i=0; i<d.mpld3_elements.length; i++){
var type = d.mpld3_elements[i].constructor.name;
if(type =="mpld3_Line"){
var current_alpha = d.mpld3_elements[i].props.alpha;
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.select(d.mpld3_elements[i].path[0][0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("stroke-width", is_over ?
alpha_over * d.mpld3_elements[i].props.edgewidth : d.mpld3_elements[i].props.edgewidth);
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
var current_alpha = d.mpld3_elements[i].props.alphas[0];
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.selectAll(d.mpld3_elements[i].pathsobj[0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("fill-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel));
} else{
console.log(type + " not yet supported");
}
}
};
// helper function for determining the color of the rectangles
function get_color(d){
var type = d.mpld3_elements[0].constructor.name;
var color = "black";
if(type =="mpld3_Line"){
color = d.mpld3_elements[0].props.edgecolor;
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
color = d.mpld3_elements[0].props.facecolors[0];
} else{
console.log(type + " not yet supported");
}
return color;
};
};
"""
css_ = """
.legend-box {
cursor: pointer;
}
"""
def __init__(self, plot_elements, labels, ax=None,
alpha_unsel=0.2, alpha_over=1., start_visible=True):
self.ax = ax
if ax:
ax = get_id(ax)
# start_visible could be a list
if isinstance(start_visible, bool):
start_visible = [start_visible] * len(labels)
elif not len(start_visible) == len(labels):
raise ValueError("{} out of {} visible params has been set"
.format(len(start_visible), len(labels)))
mpld3_element_ids = self._determine_mpld3ids(plot_elements)
self.mpld3_element_ids = mpld3_element_ids
self.dict_ = {"type": "interactive_legend",
"element_ids": mpld3_element_ids,
"labels": labels,
"ax": ax,
"alpha_unsel": alpha_unsel,
"alpha_over": alpha_over,
"start_visible": start_visible}
def _determine_mpld3ids(self, plot_elements):
"""
Helper function to get the mpld3_id for each
of the specified elements.
"""
mpld3_element_ids = []
# There are two things being done here. First,
# we make sure that we have a list of lists, where
# each inner list is associated with a single legend
# item. Second, in case of Line2D object we pass
# the id for both the marker and the line.
# on the javascript side we filter out the nulls in
# case either the line or the marker has no equivalent
# D3 representation.
for entry in plot_elements:
ids = []
if isinstance(entry, collections.Iterable):
for element in entry:
mpld3_id = get_id(element)
ids.append(mpld3_id)
if isinstance(element, matplotlib.lines.Line2D):
mpld3_id = get_id(element, 'pts')
ids.append(mpld3_id)
else:
ids.append(get_id(entry))
if isinstance(entry, matplotlib.lines.Line2D):
mpld3_id = get_id(entry, 'pts')
ids.append(mpld3_id)
mpld3_element_ids.append(ids)
return mpld3_element_ids
DEFAULT_PLUGINS = [Reset(), Zoom(), BoxZoom()]
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/graphics/tests/test_mosaicplot.py | 17 | 18878 | from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = datas.sort(['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
fig, vals = mosaic(mydata, ['id1','id2'])
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/examples/analysis_retinotopicmapping/batch_MarkPatches.py | 1 | 1417 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 30 14:46:38 2014
@author: junz
"""
import os
import matplotlib.pyplot as plt
import corticalmapping.core.FileTools as ft
import corticalmapping.RetinotopicMapping as rm
trialName = '160208_M193206_Trial1.pkl'
names = [
['patch01', 'V1'],
['patch02', 'RL'],
['patch03', 'LM'],
['patch04', 'AL'],
['patch05', 'AM'],
['patch06', 'PM'],
['patch07', 'MMA'],
['patch08', 'MMP'],
['patch09', 'LLA'],
# ['patch10', 'AM'],
# ['patch11', 'LLA'],
# ['patch12', 'MMP'],
# ['patch13', 'MMP']
# ['patch14', 'MMP']
]
currFolder = os.path.dirname(os.path.realpath(__file__))
os.chdir(currFolder)
trialPath = os.path.join(currFolder,trialName)
trialDict = ft.loadFile(trialPath)
finalPatches = dict(trialDict['finalPatches'])
for i, namePair in enumerate(names):
currPatch = finalPatches.pop(namePair[0])
newPatchDict = {namePair[1]:currPatch}
finalPatches.update(newPatchDict)
trialDict.update({'finalPatchesMarked':finalPatches})
ft.saveFile(trialPath,trialDict)
trial, _ = rm.loadTrial(trialPath)
f = plt.figure(figsize=(10,10))
ax = f.add_subplot(111)
trial.plotFinalPatchBorders2(plotAxis = ax,borderWidth=2)
plt.show()
f.savefig(trialName[0:-4]+'_borders.pdf',dpi=600)
f.savefig(trialName[0:-4]+'_borders.png',dpi=300) | gpl-3.0 |
ghwatson/SpanishAcquisitionIQC | spacq/gui/display/plot/surface.py | 2 | 2280 | from matplotlib import pyplot
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from mpl_toolkits.mplot3d import axes3d
import numpy
import wx
"""
An embeddable three-dimensional surface plot.
"""
class SurfacePlot(object):
"""
A surface plot.
"""
alpha = 0.8
def __init__(self, parent, style='surface'):
self.style = style
self.figure = pyplot.figure()
self.canvas = FigureCanvas(parent, wx.ID_ANY, self.figure)
self.axes = axes3d.Axes3D(self.figure)
self.surface = None
def __del__(self):
try:
self.close()
except Exception:
pass
@property
def control(self):
"""
A drawable control.
"""
return self.canvas
def close(self):
"""
Inform pyplot that this figure is no longer required.
"""
pyplot.close(self.figure.number)
def set_surface_data(self, data):
"""
Set the surface data based on the data tuple.
"""
if self.surface is not None:
self.axes.collections.remove(self.surface)
self.surface = None
if data is None:
return
surface_data, x_bounds, y_bounds = data
# Number of values along each axis.
y_num, x_num = surface_data.shape
# The equally-spaced values along each axis.
x_values = numpy.linspace(*x_bounds, num=x_num)
y_values = numpy.linspace(*y_bounds, num=y_num)
# The meshgrid of values.
x, y = numpy.meshgrid(x_values, y_values)
if self.style == 'surface':
# Just a regular surface.
self.surface = self.axes.plot_surface(x, y, surface_data, alpha=self.alpha)
elif self.style == 'waveform':
# Waveform style shows individual waveforms nicely.
self.surface = self.axes.plot_wireframe(x, y, surface_data, cstride=100000)
surface_data = property(fset=set_surface_data)
@property
def x_label(self):
"""
The x axis label.
"""
return self.axes.get_xlabel()
@x_label.setter
def x_label(self, value):
self.axes.set_xlabel(value)
@property
def y_label(self):
"""
The y axis label.
"""
return self.axes.get_ylabel()
@y_label.setter
def y_label(self, value):
self.axes.set_ylabel(value)
@property
def z_label(self):
"""
The z axis label.
"""
return self.axes.get_zlabel()
@z_label.setter
def z_label(self, value):
self.axes.set_zlabel(value)
def redraw(self):
self.canvas.draw()
| bsd-2-clause |
pratapvardhan/pandas | pandas/tests/frame/test_convert_to.py | 3 | 12494 | # -*- coding: utf-8 -*-
from datetime import datetime
import pytest
import pytz
import collections
from collections import OrderedDict, defaultdict
import numpy as np
from pandas import compat
from pandas.compat import long
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(TestData):
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
assert (test_data.to_dict(orient='records') ==
expected_records)
assert (test_data_mixed.to_dict(orient='records') ==
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
pytest.raises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
# convert_datetime64 defaults to None
expected = df.index.values[0]
result = df.to_records()['index'][0]
assert expected == result
# check for FutureWarning if convert_datetime64=False is passed
with tm.assert_produces_warning(FutureWarning):
expected = df.index.values[0]
result = df.to_records(convert_datetime64=False)['index'][0]
assert expected == result
# check for FutureWarning if convert_datetime64=True is passed
with tm.assert_produces_warning(FutureWarning):
expected = df.index[0]
result = df.to_records(convert_datetime64=True)['index'][0]
assert expected == result
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
assert 'bar' in r
assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <user@example.com>\n'
'To: <someone_else@example.com>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
assert 'X' in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert 'index' in rs.dtype.fields
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
assert 'level_0' in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
"formats": ['=i8', '=f8']}
)
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('mapping', [
dict,
collections.defaultdict(list),
collections.OrderedDict])
def test_to_dict(self, mapping):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
# GH16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp", mapping)
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r", mapping)
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': np.nan, 'B': '3'}]
assert isinstance(recons_data, list)
assert (len(recons_data) == 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
df = DataFrame(test_data)
df['duped'] = df[df.columns[0]]
recons_data = df.to_dict("i")
comp_data = test_data.copy()
comp_data['duped'] = comp_data[df.columns[0]]
for k, v in compat.iteritems(comp_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
@pytest.mark.parametrize('mapping', [
list,
collections.defaultdict,
[]])
def test_to_dict_errors(self, mapping):
# GH16122
df = DataFrame(np.random.randn(3, 3))
with pytest.raises(TypeError):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b'])
with tm.assert_produces_warning(UserWarning):
df.to_dict()
@pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
def test_to_records_datetimeindex_with_tz(self, tz):
# GH13937
dr = date_range('2016-01-01', periods=10,
freq='S', tz=tz)
df = DataFrame({'datetime': dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
def test_to_dict_box_scalars(self):
# 14216
# make sure that we are boxing properly
d = {'a': [1], 'b': ['b']}
result = DataFrame(d).to_dict()
assert isinstance(list(result['a'])[0], (int, long))
assert isinstance(list(result['b'])[0], (int, long))
result = DataFrame(d).to_dict(orient='records')
assert isinstance(result[0]['a'], (int, long))
def test_frame_to_dict_tz(self):
# GH18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)]
df = DataFrame(list(data), columns=["d", ])
result = df.to_dict(orient='records')
expected = [
{'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)},
{'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
@pytest.mark.parametrize('into, expected', [
(dict, {0: {'int_col': 1, 'float_col': 1.0},
1: {'int_col': 2, 'float_col': 2.0},
2: {'int_col': 3, 'float_col': 3.0}}),
(OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}),
(1, {'int_col': 2, 'float_col': 2.0}),
(2, {'int_col': 3, 'float_col': 3.0})])),
(defaultdict(list), defaultdict(list,
{0: {'int_col': 1, 'float_col': 1.0},
1: {'int_col': 2, 'float_col': 2.0},
2: {'int_col': 3, 'float_col': 3.0}}))
])
def test_to_dict_index_dtypes(self, into, expected):
# GH 18580
# When using to_dict(orient='index') on a dataframe with int
# and float columns only the int columns were cast to float
df = DataFrame({'int_col': [1, 2, 3],
'float_col': [1.0, 2.0, 3.0]})
result = df.to_dict(orient='index', into=into)
cols = ['int_col', 'float_col']
result = DataFrame.from_dict(result, orient='index')[cols]
expected = DataFrame.from_dict(expected, orient='index')[cols]
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/tree/export.py | 4 | 17978 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Li Li <aiki.nogard@gmail.com>
# License: BSD 3 clause
from numbers import Integral
import numpy as np
from ..externals import six
from ..utils.validation import check_is_fitted
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=None, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False, precision=3):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default=None)
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, optional (default=3)
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] -
sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
precision),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], precision)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, precision)
elif proportion:
# Classification
value_text = np.around(value, precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
check_is_fitted(decision_tree, 'tree_')
own_file = False
return_string = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_ in the decision_tree
if feature_names is not None:
if len(feature_names) != decision_tree.n_features_:
raise ValueError("Length of feature_names, %d "
"does not match number of features, %d"
% (len(feature_names),
decision_tree.n_features_))
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
| bsd-3-clause |
yunfeilu/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
cybernet14/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
carefree0910/MachineLearning | f_NN/Networks.py | 1 | 12872 | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import matplotlib.pyplot as plt
from f_NN.Layers import *
from f_NN.Optimizers import *
from Util.Bases import ClassifierBase
from Util.ProgressBar import ProgressBar
class NNVerbose:
NONE = 0
EPOCH = 1
METRICS = 2
METRICS_DETAIL = 3
DETAIL = 4
DEBUG = 5
class NaiveNN(ClassifierBase):
NaiveNNTiming = Timing()
def __init__(self, **kwargs):
super(NaiveNN, self).__init__(**kwargs)
self._layers, self._weights, self._bias = [], [], []
self._w_optimizer = self._b_optimizer = None
self._current_dimension = 0
self._params["lr"] = kwargs.get("lr", 0.001)
self._params["epoch"] = kwargs.get("epoch", 10)
self._params["optimizer"] = kwargs.get("optimizer", "Adam")
# Utils
@NaiveNNTiming.timeit(level=4)
def _add_params(self, shape):
self._weights.append(np.random.randn(*shape))
self._bias.append(np.zeros((1, shape[1])))
@NaiveNNTiming.timeit(level=4)
def _add_layer(self, layer, *args):
current, nxt = args
self._add_params((current, nxt))
self._current_dimension = nxt
self._layers.append(layer)
@NaiveNNTiming.timeit(level=1)
def _get_activations(self, x):
activations = [self._layers[0].activate(x, self._weights[0], self._bias[0])]
for i, layer in enumerate(self._layers[1:]):
activations.append(layer.activate(
activations[-1], self._weights[i + 1], self._bias[i + 1]))
return activations
@NaiveNNTiming.timeit(level=1)
def _get_prediction(self, x):
return self._get_activations(x)[-1]
# Optimizing Process
@NaiveNNTiming.timeit(level=4)
def _init_optimizers(self, optimizer, lr, epoch):
opt_fac = OptFactory()
self._w_optimizer = opt_fac.get_optimizer_by_name(
optimizer, self._weights, lr, epoch)
self._b_optimizer = opt_fac.get_optimizer_by_name(
optimizer, self._bias, lr, epoch)
@NaiveNNTiming.timeit(level=1)
def _opt(self, i, _activation, _delta):
self._weights[i] += self._w_optimizer.run(
i, _activation.T.dot(_delta)
)
self._bias[i] += self._b_optimizer.run(
i, np.sum(_delta, axis=0, keepdims=True)
)
# API
@NaiveNNTiming.timeit(level=4, prefix="[API] ")
def add(self, layer):
if not self._layers:
self._layers, self._current_dimension = [layer], layer.shape[1]
self._add_params(layer.shape)
else:
nxt = layer.shape[0]
layer.shape = (self._current_dimension, nxt)
self._add_layer(layer, self._current_dimension, nxt)
@NaiveNNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, optimizer=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
self._init_optimizers(optimizer, lr, epoch)
layer_width = len(self._layers)
for counter in range(epoch):
self._w_optimizer.update()
self._b_optimizer.update()
activations = self._get_activations(x)
deltas = [self._layers[-1].bp_first(y, activations[-1])]
for i in range(-1, -len(activations), -1):
deltas.append(self._layers[i - 1].bp(
activations[i - 1], self._weights[i], deltas[-1]
))
for i in range(layer_width - 1, 0, -1):
self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
self._opt(0, x, deltas[-1])
@NaiveNNTiming.timeit(level=4, prefix="[API] ")
def predict(self, x, get_raw_results=False, **kwargs):
y_pred = self._get_prediction(np.atleast_2d(x))
if get_raw_results:
return y_pred
return np.argmax(y_pred, axis=1)
class NN(NaiveNN):
NNTiming = Timing()
def __init__(self, **kwargs):
super(NN, self).__init__(**kwargs)
self._available_metrics = {
key: value for key, value in zip(["acc", "f1-score"], [NN.acc, NN.f1_score])
}
self._metrics, self._metric_names, self._logs = [], [], {}
self.verbose = None
self._params["batch_size"] = kwargs.get("batch_size", 256)
self._params["train_rate"] = kwargs.get("train_rate", None)
self._params["metrics"] = kwargs.get("metrics", None)
self._params["record_period"] = kwargs.get("record_period", 100)
self._params["verbose"] = kwargs.get("verbose", 1)
# Utils
@NNTiming.timeit(level=1)
def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None):
if verbose is None:
verbose = self.verbose
single_batch = batch_size / np.prod(x.shape[1:]) # type: float
single_batch = int(single_batch)
if not single_batch:
single_batch = 1
if single_batch >= len(x):
return self._get_activations(x).pop()
epoch = int(len(x) / single_batch)
if not len(x) % single_batch:
epoch += 1
name = "Prediction" if name is None else "Prediction ({})".format(name)
sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
if verbose >= NNVerbose.METRICS:
sub_bar.start()
rs, count = [self._get_activations(x[:single_batch]).pop()], single_batch
if verbose >= NNVerbose.METRICS:
sub_bar.update()
while count < len(x):
count += single_batch
if count >= len(x):
rs.append(self._get_activations(x[count - single_batch:]).pop())
else:
rs.append(self._get_activations(x[count - single_batch:count]).pop())
if verbose >= NNVerbose.METRICS:
sub_bar.update()
return np.vstack(rs)
@NNTiming.timeit(level=4, prefix="[API] ")
def _preview(self):
if not self._layers:
rs = "None"
else:
rs = (
"Input : {:<10s} - {}\n".format("Dimension", self._layers[0].shape[0]) +
"\n".join(
["Layer : {:<10s} - {}".format(
_layer.name, _layer.shape[1]
) for _layer in self._layers[:-1]]
) + "\nCost : {:<10s}".format(self._layers[-1].name)
)
print("=" * 30 + "\n" + "Structure\n" + "-" * 30 + "\n" + rs + "\n" + "=" * 30)
print("Optimizer")
print("-" * 30)
print(self._w_optimizer)
print("=" * 30)
@NNTiming.timeit(level=2)
def _append_log(self, x, y, y_classes, name):
y_pred = self._get_prediction(x, name)
y_pred_classes = np.argmax(y_pred, axis=1)
for i, metric in enumerate(self._metrics):
self._logs[name][i].append(metric(y_classes, y_pred_classes))
self._logs[name][-1].append(self._layers[-1].calculate(y, y_pred) / len(y))
@NNTiming.timeit(level=3)
def _print_metric_logs(self, data_type):
print()
print("=" * 47)
for i, name in enumerate(self._metric_names):
print("{:<16s} {:<16s}: {:12.8}".format(
data_type, name, self._logs[data_type][i][-1]))
print("{:<16s} {:<16s}: {:12.8}".format(
data_type, "loss", self._logs[data_type][-1][-1]))
print("=" * 47)
@NNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, batch_size=None, train_rate=None,
optimizer=None, metrics=None, record_period=None, verbose=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
if batch_size is None:
batch_size = self._params["batch_size"]
if train_rate is None:
train_rate = self._params["train_rate"]
if metrics is None:
metrics = self._params["metrics"]
if record_period is None:
record_period = self._params["record_period"]
if verbose is None:
verbose = self._params["verbose"]
self.verbose = verbose
self._init_optimizers(optimizer, lr, epoch)
layer_width = len(self._layers)
self._preview()
if train_rate is not None:
train_rate = float(train_rate)
train_len = int(len(x) * train_rate)
shuffle_suffix = np.random.permutation(len(x))
x, y = x[shuffle_suffix], y[shuffle_suffix]
x_train, y_train = x[:train_len], y[:train_len]
x_test, y_test = x[train_len:], y[train_len:]
else:
x_train = x_test = x
y_train = y_test = y
y_train_classes = np.argmax(y_train, axis=1)
y_test_classes = np.argmax(y_test, axis=1)
train_len = len(x_train)
batch_size = min(batch_size, train_len)
do_random_batch = train_len > batch_size
train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1
if metrics is None:
metrics = []
self._metrics = self.get_metrics(metrics)
self._metric_names = [_m.__name__ for _m in metrics]
self._logs = {
name: [[] for _ in range(len(metrics) + 1)] for name in ("train", "test")
}
bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
if self.verbose >= NNVerbose.EPOCH:
bar.start()
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
for counter in range(epoch):
if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
sub_bar.start()
for _ in range(train_repeat):
if do_random_batch:
batch = np.random.choice(train_len, batch_size)
x_batch, y_batch = x_train[batch], y_train[batch]
else:
x_batch, y_batch = x_train, y_train
self._w_optimizer.update()
self._b_optimizer.update()
activations = self._get_activations(x_batch)
deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
for i in range(-1, -len(activations), -1):
deltas.append(
self._layers[i - 1].bp(activations[i - 1], self._weights[i], deltas[-1])
)
for i in range(layer_width - 1, 0, -1):
self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
self._opt(0, x_batch, deltas[-1])
if self.verbose >= NNVerbose.EPOCH:
if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
self._append_log(x_train, y_train, y_train_classes, "train")
self._append_log(x_test, y_test, y_test_classes, "test")
self._print_metric_logs("train")
self._print_metric_logs("test")
if self.verbose >= NNVerbose.EPOCH:
sub_bar.update()
if (counter + 1) % record_period == 0:
self._append_log(x_train, y_train, y_train_classes, "train")
self._append_log(x_test, y_test, y_test_classes, "test")
if self.verbose >= NNVerbose.METRICS:
self._print_metric_logs("train")
self._print_metric_logs("test")
if self.verbose >= NNVerbose.EPOCH:
bar.update(counter // record_period + 1)
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
def draw_logs(self):
metrics_log, loss_log = {}, {}
for key, value in sorted(self._logs.items()):
metrics_log[key], loss_log[key] = value[:-1], value[-1]
for i, name in enumerate(sorted(self._metric_names)):
plt.figure()
plt.title("Metric Type: {}".format(name))
for key, log in sorted(metrics_log.items()):
xs = np.arange(len(log[i])) + 1
plt.plot(xs, log[i], label="Data Type: {}".format(key))
plt.legend(loc=4)
plt.show()
plt.close()
plt.figure()
plt.title("Cost")
for key, loss in sorted(loss_log.items()):
xs = np.arange(len(loss)) + 1
plt.plot(xs, loss, label="Data Type: {}".format(key))
plt.legend()
plt.show()
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/test_replace.py | 15 | 43479 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import re
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import (DataFrame, Series, Index, date_range, compat,
Timestamp)
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReplace(TestData):
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
pytest.raises(TypeError, self.tsframe.replace, nan, inplace=True)
pytest.raises(TypeError, self.tsframe.replace, nan)
# mixed type
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
# mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
result = df.replace({'Type': {'Q': 0, 'T': 1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
def test_replace_regex_metachar(self):
metachars = '[]', '()', r'\d', r'\w', r'\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame([('-', pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
df1 = df.replace('-', np.nan)
expected_df = pd.DataFrame([(np.nan, pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df, expected)
# int block splitting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64'),
'C': Series([1, 2], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64'),
'C': Series([1, 2], dtype='int64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1, 'foo'], dtype='object'),
'B': Series([0, 1], dtype='int64')})
result = df.replace(2, 'foo')
assert_frame_equal(result, expected)
expected = DataFrame({'A': Series(['foo', 'bar'], dtype='object'),
'B': Series([0, 'foo'], dtype='object')})
result = df.replace([1, 2], ['foo', 'bar'])
assert_frame_equal(result, expected)
# test case from
df = DataFrame({'A': Series([3, 0], dtype='int64'),
'B': Series([0, 3], dtype='int64')})
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
pytest.raises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
assert result.values.all()
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
pytest.raises(ValueError, df.replace, to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
# dict to scalar
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
pytest.raises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
assert_frame_equal(res1, res2)
assert_frame_equal(res2, res3)
assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({r'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
'fname': {
'out_augmented_AUG_2011.json':
pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json':
pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json':
pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json':
pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json':
pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json':
pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json':
pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [0, np.nan, 2]})
result = df.replace(np.nan, 1)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': Series([0, 1, 2], dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Pacific'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Pacific'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({'A': np.nan}, Timestamp('20130104'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self):
# GH 15289
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
assert_frame_equal(df, df.replace({}))
assert_frame_equal(df, df.replace(Series([])))
assert_frame_equal(df, df.replace({'b': {}}))
assert_frame_equal(df, df.replace(Series({'b': {}})))
| apache-2.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/stats/tests/test_moments.py | 3 | 89255 | import nose
import sys
import functools
import warnings
from datetime import datetime
from numpy.random import randn
from numpy.testing.decorators import slow
import numpy as np
from distutils.version import LooseVersion
from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull, concat
from pandas.util.testing import (
assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_index_equal
)
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
from pandas.compat import range, zip, PY3, StringIO
N, K = 100, 10
class Base(tm.TestCase):
_multiprocess_can_split_ = True
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestMoments(Base):
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,Series(np.ones(10)),3,center=True ,axis=1)
# ok ok
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=0)
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=1)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,DataFrame(np.ones((10,10))),3,center=True ,axis=2)
def test_rolling_sum(self):
self._check_moment_func(mom.rolling_sum, np.sum)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(mom.rolling_count, counter,
has_min_periods=False,
preserve_nan=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean)
def test_cmov_mean(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49,
16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_mean(vals, 5, center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_mean(Series(vals), 5, center=True)
assert_series_equal(xp, rs)
def test_cmov_window(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_window(Series(vals), 5, 'boxcar', center=True)
assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
# GH 8238
tm._skip_if_no_scipy()
# all nan
vals = np.empty(10, dtype=float)
vals.fill(np.nan)
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertTrue(np.isnan(rs).all())
# empty
vals = np.array([])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertEqual(len(rs), 0)
# shorter than window
vals = np.random.randn(5)
rs = mom.rolling_window(vals, 10, 'boxcar')
self.assertTrue(np.isnan(rs).all())
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
# Gh 8238
tm._skip_if_no_scipy()
vals = np.array([[ 12.18, 3.64],
[ 10.18, 9.16],
[ 13.24, 14.61],
[ 4.51, 8.11],
[ 6.15, 11.44],
[ 9.14, 6.21],
[ 11.31, 10.67],
[ 2.94, 6.51],
[ 9.42, 8.39],
[ 12.44, 7.34 ]])
xp = np.array([[ np.nan, np.nan],
[ np.nan, np.nan],
[ 9.252, 9.392],
[ 8.644, 9.906],
[ 8.87 , 10.208],
[ 6.81 , 8.588],
[ 7.792, 8.644],
[ 9.05 , 7.824],
[ np.nan, np.nan],
[ np.nan, np.nan]])
# DataFrame
rs = mom.rolling_window(DataFrame(vals), 5, 'boxcar', center=True)
assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = mom.rolling_mean(vals, 5, min_periods=4, center=True)
rs = mom.rolling_window(vals, 5, 'boxcar', min_periods=4, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009,
14.03687, 13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556,
13.33889, 13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559,
14.17267, 14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671,
14.03559, 15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607,
14.20036, 14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan]}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt in win_types:
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_missing_data(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, np.nan, 10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345,
9.17869, 12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599,
9.1764, 12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384,
9.56348, 12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618,
9.16786, 13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667,
10.34667, 12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098,
13.65509]
}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, min_periods=3)
assert_series_equal(xp, rs)
def test_cmov_window_special(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763,
13.89053, 13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589,
11.73161, 13.08516, 12.95111, 12.74577,
np.nan, np.nan],
'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284,
12.88331, 12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161,
12.75129, 12.90702, 12.83757, np.nan, np.nan]
}
for wt, k in zip(win_types, kwds):
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_cmov_window_special_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt, k in zip(win_types, kwds):
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(mom.rolling_median, np.median)
def test_rolling_min(self):
self._check_moment_func(mom.rolling_min, np.min)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_min(a, window=100, min_periods=1)
assert_almost_equal(b, np.ones(len(a)))
self.assertRaises(ValueError, mom.rolling_min, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_max(self):
self._check_moment_func(mom.rolling_max, np.max)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_max(a, window=100, min_periods=1)
assert_almost_equal(a, b)
self.assertRaises(ValueError, mom.rolling_max, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_quantile(self):
qs = [.1, .5, .9]
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = per / 1. * (values.shape[0] - 1)
return values[int(idx)]
for q in qs:
def f(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_quantile(x, window, q,
min_periods=min_periods,
freq=freq,
center=center)
def alt(x):
return scoreatpercentile(x, q)
self._check_moment_func(f, alt)
def test_rolling_apply(self):
# suppress warnings about empty slices, as we are deliberately testing with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
ser = Series([])
assert_series_equal(ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
def roll_mean(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_apply(x, window,
lambda x: x[np.isfinite(x)].mean(),
min_periods=min_periods,
freq=freq,
center=center)
self._check_moment_func(roll_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.rolling_apply(s, 2, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 2.])
assert_series_equal(result, expected)
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
# it works!
result = mom.rolling_apply(arr, 10, np.sum)
self.assertTrue(isnull(result).all())
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
assert_almost_equal(result, result)
def test_rolling_std(self):
self._check_moment_func(mom.rolling_std,
lambda x: np.std(x, ddof=1))
self._check_moment_func(functools.partial(mom.rolling_std, ddof=0),
lambda x: np.std(x, ddof=0))
def test_rolling_std_1obs(self):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
expected = np.array([np.nan] * 5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1, ddof=0)
expected = np.zeros(5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
self.assertTrue(np.isnan(result[2]))
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = np.array([0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767])
b = mom.rolling_std(a, window=3)
self.assertTrue(np.isfinite(b[2:]).all())
b = mom.ewmstd(a, span=3)
self.assertTrue(np.isfinite(b[2:]).all())
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var,
lambda x: np.var(x, ddof=1),
test_stable=True)
self._check_moment_func(functools.partial(mom.rolling_var, ddof=0),
lambda x: np.var(x, ddof=0))
def test_rolling_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_skew,
lambda x: skew(x, bias=False))
def test_rolling_kurt(self):
try:
from scipy.stats import kurtosis
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_kurt,
lambda x: kurtosis(x, bias=False))
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
arr = np.frombuffer(data, dtype='<f8')
if sys.byteorder != "little":
arr = arr.byteswap().newbyteorder()
result = mom.rolling_sum(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_mean(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_var(arr, 2)
self.assertTrue((result[1:] >= 0).all())
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
result = mom.rolling_mean(arr, 1)
self.assertTrue(result[-1] >= 0)
result = mom.rolling_mean(-arr, 1)
self.assertTrue(result[-1] <= 0)
def _check_moment_func(self, func, static_comp, window=50,
has_min_periods=True,
has_center=True,
has_time_rule=True,
preserve_nan=True,
fill_value=None,
test_stable=False):
self._check_ndarray(func, static_comp, window=window,
has_min_periods=has_min_periods,
preserve_nan=preserve_nan,
has_center=has_center,
fill_value=fill_value,
test_stable=test_stable)
self._check_structures(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center)
def _check_ndarray(self, func, static_comp, window=50,
has_min_periods=True,
preserve_nan=True,
has_center=True,
fill_value=None,
test_stable=False,
test_window=True):
result = func(self.arr, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
if has_min_periods:
result = func(arr, 50, min_periods=30)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# min_periods is working correctly
result = func(arr, 20, min_periods=15)
self.assertTrue(np.isnan(result[23]))
self.assertFalse(np.isnan(result[24]))
self.assertFalse(np.isnan(result[-6]))
self.assertTrue(np.isnan(result[-5]))
arr2 = randn(20)
result = func(arr2, 10, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, 20, min_periods=0)
result1 = func(arr, 20, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr, 50)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# GH 7925
if has_center:
if has_min_periods:
result = func(arr, 20, min_periods=15, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20, min_periods=15)[9:]
else:
result = func(arr, 20, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
self.assert_numpy_array_equivalent(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:] + 1e9))
# Test window larger than array, #7297
if test_window:
if has_min_periods:
for minp in (0, len(self.arr)-1, len(self.arr)):
result = func(self.arr, len(self.arr)+1, min_periods=minp)
expected = func(self.arr, len(self.arr), min_periods=minp)
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask,
np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = func(self.arr, len(self.arr)+1)
expected = func(self.arr, len(self.arr))
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
def _check_structures(self, func, static_comp,
has_min_periods=True, has_time_rule=True,
has_center=True,
fill_value=None):
series_result = func(self.series, 50)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, 50)
self.assertEqual(type(frame_result), DataFrame)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
if has_min_periods:
series_result = func(self.series[::2], win, min_periods=minp,
freq='B')
frame_result = func(self.frame[::2], win, min_periods=minp,
freq='B')
else:
series_result = func(self.series[::2], win, freq='B')
frame_result = func(self.frame[::2], win, freq='B')
last_date = series_result.index[-1]
prev_date = last_date - 24 * datetools.bday
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
assert_almost_equal(series_result[-1], static_comp(trunc_series))
assert_almost_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp))
# GH 7925
if has_center:
if has_min_periods:
minp = 10
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, min_periods=minp,
center=True)
frame_rs = func(self.frame, 25, min_periods=minp,
center=True)
else:
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, center=True)
frame_rs = func(self.frame, 25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
assert_series_equal(series_xp, series_rs)
assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(mom.ewma)
arr = np.zeros(1000)
arr[5] = 1
result = mom.ewma(arr, span=100, adjust=False).sum()
self.assertTrue(np.abs(result - 1) < 1e-2)
s = Series([1.0, 2.0, 4.0, 8.0])
expected = Series([1.0, 1.6, 2.736842, 4.923077])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=True),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [1.] * len(s))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [np.nan] * 2 + [1.] * 4)
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha)**2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha)**2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha), np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha)**3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha)**2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha)**3, np.nan, (1. - alpha) * alpha, alpha * ((1. - alpha)**2 + alpha)]),
(s3, False, True, [(1. - alpha)**2, np.nan, (1. - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = mom.ewma(s, com=com, adjust=adjust, ignore_na=ignore_na)
assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = mom.ewma(s, com=com, adjust=adjust)
assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(mom.ewmvar)
def test_ewmvol(self):
self._check_ew(mom.ewmvol)
def test_ewma_span_com_args(self):
A = mom.ewma(self.arr, com=9.5)
B = mom.ewma(self.arr, span=20)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ewma_halflife_arg(self):
A = mom.ewma(self.arr, com=13.932726172912965)
B = mom.ewma(self.arr, halflife=10.0)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]
for f in funcs:
result = f(arr, 3)
assert_almost_equal(result, arr)
def _check_ew(self, func):
self._check_ew_ndarray(func)
self._check_ew_structures(func)
def _check_ew_ndarray(self, func, preserve_nan=False):
result = func(self.arr, com=10)
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), 50, min_periods=min_periods)
if func == mom.ewma:
assert_series_equal(result, Series([1.]))
else:
# ewmstd, ewmvol, ewmvar with bias=False require at least two values
assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
self.assertEqual(result2.dtype, np.float_)
def _check_ew_structures(self, func):
series_result = func(self.series, com=10)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=['a', 'b', 99, 'd', 'd']),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel()
return len(set(values[notnull(values)])) == 1
def no_nans(x):
return x.notnull().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [ (x, is_constant(x), no_nans(x)) for x in data ]
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def _test_moments_consistency(self,
min_periods,
count, mean, mock_mean, corr,
var_unbiased=None, std_unbiased=None, cov_unbiased=None,
var_biased=None, std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notnull(values)].tolist())
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected)
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = x.max().max()
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
self.assertFalse((var_x < 0).any().any())
self.assertFalse((std_x < 0).any().any())
if cov:
cov_x_x = cov(x, x)
self.assertFalse((cov_x_x < 0).any().any())
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
self.assertFalse((var_x > 0).any().any())
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isnull().equals(y.isnull()):
# can only easily test two Series with similar structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) - var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) * std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) - mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([ _weights(s.iloc[:, i],
com=com,
adjust=adjust,
ignore_na=ignore_na) for i, _ in enumerate(s.columns) ],
axis=1)
w.index=s.index
w.columns=s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notnull()] = _weights(s[s.notnull()], com=com, adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha, i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method='ffill')
result[mom.expanding_count(s) < (max(min_periods, 1) if min_periods else 1)] = np.nan
return result
com = 3.
for min_periods in [0, 1, 2, 3, 4]:
for adjust in [True, False]:
for ignore_na in [False, True]:
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
mock_mean=lambda x: _ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
corr=lambda x, y: mom.ewmcorr(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
var_unbiased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
std_unbiased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
cov_unbiased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
var_biased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
std_biased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
@slow
def test_expanding_consistency(self):
base_functions = [
(mom.expanding_count, lambda v: Series(v).count(), None),
(mom.expanding_max, lambda v: Series(v).max(), None),
(mom.expanding_min, lambda v: Series(v).min(), None),
(mom.expanding_sum, lambda v: Series(v).sum(), None),
(mom.expanding_mean, lambda v: Series(v).mean(), None),
(mom.expanding_std, lambda v: Series(v).std(), 1),
(mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.expanding_var, lambda v: Series(v).var(), 1),
#(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.expanding_median, lambda v: Series(v).median(), None),
(mom.expanding_max, np.nanmax, 1),
(mom.expanding_min, np.nanmin, 1),
(mom.expanding_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.expanding_mean, np.nanmean, 1),
(mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.expanding_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.expanding_max, np.max, None),
(mom.expanding_min, np.min, None),
(mom.expanding_sum, np.sum, None),
(mom.expanding_mean, np.mean, None),
(mom.expanding_std, lambda v: np.std(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.var(v, ddof=1), 1),
(mom.expanding_median, np.median, None),
]
# suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
# test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
# or (b) expanding_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (expanding_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if expanding_f is mom.expanding_count:
expanding_f_result = expanding_f(x)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
else:
if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
else:
expanding_f_result = expanding_f(x, min_periods=min_periods)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
assert_equal(expanding_f_result, expanding_apply_f_result)
if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
# test pairwise=True
expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
assert_panel_equal(expanding_f_result, expected)
@slow
def test_rolling_consistency(self):
base_functions = [
(mom.rolling_count, lambda v: Series(v).count(), None),
(mom.rolling_max, lambda v: Series(v).max(), None),
(mom.rolling_min, lambda v: Series(v).min(), None),
(mom.rolling_sum, lambda v: Series(v).sum(), None),
(mom.rolling_mean, lambda v: Series(v).mean(), None),
(mom.rolling_std, lambda v: Series(v).std(), 1),
(mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.rolling_var, lambda v: Series(v).var(), 1),
#(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.rolling_median, lambda v: Series(v).median(), None),
(mom.rolling_max, np.nanmax, 1),
(mom.rolling_min, np.nanmin, 1),
(mom.rolling_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.rolling_mean, np.nanmean, 1),
(mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.rolling_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.rolling_max, np.max, None),
(mom.rolling_min, np.min, None),
(mom.rolling_sum, np.sum, None),
(mom.rolling_mean, np.mean, None),
(mom.rolling_std, lambda v: np.std(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.var(v, ddof=1), 1),
(mom.rolling_median, np.median, None),
]
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: mom.rolling_count(x, window=window, center=center),
mean=lambda x: mom.rolling_mean(x, window=window, min_periods=min_periods, center=center),
mock_mean=lambda x: mom.rolling_sum(x, window=window, min_periods=min_periods, center=center).divide(
mom.rolling_count(x, window=window, center=center)),
corr=lambda x, y: mom.rolling_corr(x, y, window=window, min_periods=min_periods, center=center),
var_unbiased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center),
std_unbiased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center),
cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
cov_biased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center, ddof=0),
var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
# test consistency between rolling_xyz() and either (a) rolling_apply of Series.xyz(),
# or (b) rolling_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (rolling_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if rolling_f is mom.rolling_count:
rolling_f_result = rolling_f(x, window=window, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=0, center=center)
else:
if rolling_f in [mom.rolling_cov, mom.rolling_corr]:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center, pairwise=False)
else:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=min_periods, center=center)
assert_equal(rolling_f_result, rolling_apply_f_result)
if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
# test pairwise=True
rolling_f_result = rolling_f(x, x, window=window, min_periods=min_periods,
center=center, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = rolling_f(x.iloc[:, i], x.iloc[:, j],
window=window, min_periods=min_periods, center=center)
assert_panel_equal(rolling_f_result, expected)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_cov(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment(mom.rolling_cov, 10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_corr(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = mom.rolling_corr(a, b, len(a), min_periods=1)
assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment(mom.rolling_corr, 10, min_periods=5)
def _check_pairwise_moment(self, func, *args, **kwargs):
panel = func(self.frame, *args, **kwargs)
actual = panel.ix[:, 1, 5]
expected = func(self.frame[1], self.frame[5], *args, **kwargs)
tm.assert_series_equal(actual, expected)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
self.assertRaises(TypeError, mom._flex_binary_moment,5,6,None)
def test_corr_sanity(self):
#GH 3155
df = DataFrame(
np.array(
[[ 0.87024726, 0.18505595],
[ 0.64355431, 0.3091617 ],
[ 0.92372966, 0.50552513],
[ 0.00203756, 0.04520709],
[ 0.84780328, 0.33394331],
[ 0.78369152, 0.63919667]])
)
res = mom.rolling_corr(df[0],df[1],5,center=True)
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
# and some fuzzing
for i in range(10):
df = DataFrame(np.random.rand(30,2))
res = mom.rolling_corr(df[0],df[1],5,center=True)
try:
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
except:
print(res)
def test_flex_binary_frame(self):
def _check(method):
series = self.frame[1]
res = method(series, self.frame, 10)
res2 = method(self.frame, series, 10)
exp = self.frame.apply(lambda x: method(series, x, 10))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = method(self.frame, frame2, 10)
exp = DataFrame(dict((k, method(self.frame[k], frame2[k], 10))
for k in self.frame))
tm.assert_frame_equal(res3, exp)
methods = [mom.rolling_corr, mom.rolling_cov]
for meth in methods:
_check(meth)
def test_ewmcov(self):
self._check_binary_ew(mom.ewmcov)
def test_ewmcov_pairwise(self):
self._check_pairwise_moment(mom.ewmcov, span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew(mom.ewmcorr)
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment(mom.ewmcorr, span=10, min_periods=5)
def _check_binary_ew(self, func):
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
self.assertTrue(np.isnan(result.values[:14]).all())
self.assertFalse(np.isnan(result.values[14:]).any())
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply(self):
ser = Series([])
assert_series_equal(ser, mom.expanding_apply(ser, lambda x: x.mean()))
def expanding_mean(x, min_periods=1, freq=None):
return mom.expanding_apply(x,
lambda x: x.mean(),
min_periods=min_periods,
freq=freq)
self._check_expanding(expanding_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.expanding_apply(s, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 3.])
assert_series_equal(result, expected)
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = mom.expanding_apply(df, np.mean) + 20.
assert_frame_equal(mom.expanding_apply(df, mean_w_arg, args=(20,)),
expected)
assert_frame_equal(mom.expanding_apply(df, mean_w_arg,
kwargs={'const' : 20}),
expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = mom.expanding_corr(A, B)
rolling_result = mom.rolling_corr(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = mom.expanding_count(self.series)
assert_almost_equal(result, mom.rolling_count(self.series,
len(self.series)))
def test_expanding_quantile(self):
result = mom.expanding_quantile(self.series, 0.5)
rolling_result = mom.rolling_quantile(self.series,
len(self.series),
0.5, min_periods=1)
assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = mom.expanding_cov(A, B)
rolling_result = mom.rolling_cov(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_max(self):
self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)
def test_expanding_cov_pairwise(self):
result = mom.expanding_cov(self.frame)
rolling_result = mom.rolling_cov(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_corr_pairwise(self):
result = mom.expanding_corr(self.frame)
rolling_result = mom.rolling_corr(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_cov(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, None, 4.5])
assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_corr(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, None, 1.])
assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_cov(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_cov(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_corr(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_corr(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_functions_window_non_shrinkage(self):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1,5], [3, 2], [3,9], [-1,0]], columns=['A','B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
df_expected_panel = Panel(items=df.index, major_axis=df.columns, minor_axis=df.columns)
functions = [lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df_result = f(df)
assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
assert_panel_equal(df_result_panel, df_expected_panel)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2_expected = df2
df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns)
functions = [lambda x: mom.expanding_count(x),
lambda x: mom.expanding_cov(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_max(x, min_periods=5),
lambda x: mom.expanding_min(x, min_periods=5),
lambda x: mom.expanding_sum(x, min_periods=5),
lambda x: mom.expanding_mean(x, min_periods=5),
lambda x: mom.expanding_std(x, min_periods=5),
lambda x: mom.expanding_var(x, min_periods=5),
lambda x: mom.expanding_skew(x, min_periods=5),
lambda x: mom.expanding_kurt(x, min_periods=5),
lambda x: mom.expanding_quantile(x, quantile=0.5, min_periods=5),
lambda x: mom.expanding_median(x, min_periods=5),
lambda x: mom.expanding_apply(x, func=sum, min_periods=5),
lambda x: mom.rolling_count(x, window=10),
lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df1_result = f(df1)
assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.expanding_cov(x, x, pairwise=True, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
assert_panel_equal(df1_result_panel, df1_expected_panel)
df2_result_panel = f(df2)
assert_panel_equal(df2_result_panel, df2_expected_panel)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B'])
df1a = DataFrame([[1,5], [3,9]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_cov(df1, df2, pairwise=True)[2]
result2 = mom.expanding_cov(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_cov(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_cov(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,2], [3, 2], [3,4]], columns=['A','B'])
df1a = DataFrame([[1,2], [3,4]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_corr(df1, df2, pairwise=True)[2]
result2 = mom.expanding_corr(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_corr(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_corr(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_pairwise_stats_column_names_order(self):
# GH 7738
df1s = [DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C','C']),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1.,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0.,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C',1]),
DataFrame([[2.,4.],[1.,2.],[5.,2.],[8.,1.]], columns=[1,0.]),
DataFrame([[2,4.],[1,2.],[5,2.],[8,1.]], columns=[0,1.]),
DataFrame([[2,4],[1,2],[5,2],[8,1.]], columns=[1.,'X']),
]
df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X'])
s = Series([1,1,3,8])
# suppress warnings about incomparable objects, as we are deliberately testing with such column labels
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*incomparable objects.*", category=RuntimeWarning)
# DataFrame methods (which do not call _flex_binary_moment())
for f in [lambda x: x.cov(),
lambda x: x.corr(),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.columns)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=True
for f in [lambda x: mom.expanding_cov(x, pairwise=True),
lambda x: mom.expanding_corr(x, pairwise=True),
lambda x: mom.rolling_cov(x, window=3, pairwise=True),
lambda x: mom.rolling_corr(x, window=3, pairwise=True),
lambda x: mom.ewmcov(x, com=3, pairwise=True),
lambda x: mom.ewmcorr(x, com=3, pairwise=True),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=False
for f in [lambda x: mom.expanding_cov(x, pairwise=False),
lambda x: mom.expanding_corr(x, pairwise=False),
lambda x: mom.rolling_cov(x, window=3, pairwise=False),
lambda x: mom.rolling_corr(x, window=3, pairwise=False),
lambda x: mom.ewmcov(x, com=3, pairwise=False),
lambda x: mom.ewmcorr(x, com=3, pairwise=False),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
lambda x, y: mom.expanding_corr(x, y, pairwise=True),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
]:
results = [f(df, df2) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
lambda x, y: mom.expanding_corr(x, y, pairwise=False),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
]:
results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
for (df, result) in zip(df1s, results):
if result is not None:
expected_index = df.index.union(df2.index)
expected_columns = df.columns.union(df2.columns)
assert_index_equal(result.index, expected_index)
assert_index_equal(result.columns, expected_columns)
else:
tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
# DataFrame with a Series
for f in [lambda x, y: mom.expanding_cov(x, y),
lambda x, y: mom.expanding_corr(x, y),
lambda x, y: mom.rolling_cov(x, y, window=3),
lambda x, y: mom.rolling_corr(x, y, window=3),
lambda x, y: mom.ewmcov(x, y, com=3),
lambda x, y: mom.ewmcorr(x, y, com=3),
]:
results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_skew(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_skew(d, window=2)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
0.177994, 1.548824])
x = mom.rolling_skew(d, window=4)
assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_kurt(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_kurt(d, window=3)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
1.224307, 2.671499])
x = mom.rolling_kurt(d, window=4)
assert_series_equal(expected, x)
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
assert_almost_equal(result[10],
static_comp(self.arr[:11]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
arr = randn(50)
if has_min_periods:
result = func(arr, min_periods=30)
assert(np.isnan(result[:29]).all())
assert_almost_equal(result[-1], static_comp(arr[:50]))
# min_periods is working correctly
result = func(arr, min_periods=15)
self.assertTrue(np.isnan(result[13]))
self.assertFalse(np.isnan(result[14]))
arr2 = randn(20)
result = func(arr2, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, min_periods=0)
result1 = func(arr, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr)
assert_almost_equal(result[-1], static_comp(arr[:50]))
def _check_expanding_structures(self, func):
series_result = func(self.series)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True,
preserve_nan=True):
self._check_expanding_ndarray(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
preserve_nan=preserve_nan)
self._check_expanding_structures(func)
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='median')
assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0+10.0+20.0)/3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='mean')
assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_min(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_median_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_median(series, window=1, freq='D')
assert_series_equal(expected, x)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
hcchengithub/peforth | log.txt.py | 1 | 211869 |
peforth
[x] 13:59 2017-07-31 找到 JavaScript eval() equivalent in Python
https://stackoverflow.com/questions/701802/how-do-i-execute-a-string-containing-python-code-in-python
成功了!!
>>> mycode = 'print ("hello world")'
>>> exec(mycode)
hello world
>>>
The technique of returning a function from another function is known as currying:
https://stackoverflow.com/questions/14261474/how-do-i-write-a-function-that-returns-another-function
Python annoymous function lambda
http://blog.csdn.net/majianfei1023/article/details/45269343
https://www.zhihu.com/question/20125256
[x] review project-k , should project-k support python too?
which will be peforth.py 10:04 2019/11/26 it's projectk.py now.
[x] 直接問 pyforth 的原作者的版權條件 ---> 用不著了.
[x] 實驗用 exec() 生成一個 function
s = '''
def show(s):
print(s)
'''
exec(s)
>>> show('abc')
abc
>>> 成功了!
[x] Try to define an python object
s = '''
class a():
vm = None
def b(self): # self is must
print(b) # b unknown
print(self)
print(a)
vm = self
c = a()
'''
exec(s)
[x] peforth 可以引用的讀檔範例
# average5 .py
def main() :
fileName = input ("What file are the numbers in? " )
infile = open (fileName, ' r ')
sum = 0
count = 0
for line in infile:
sum = sum + eval (line)
count = count + 1
print ("\nThe average Of the numbers is", sum / count)
main ( )
# average6.py
def main() :
fileName = input ("What file are the numbers in? " )
infile = open ( fileName
sum = 0.0
count = 0
line = infile.readline()
while line != ""
sum = sum + eval(line)
count = count + 1
line = infile.readline()
print("\nThe average Of the numbers is", sum / count)
main()
[x] module 的用法搞懂了,很簡單。 peforth.py 就是 peforth VM.
不需要像 javascript 用一個 function 把整個 vm 包起來, see
GitHub\peforth\projectile.py
python can redefine functions and methods. Function and methods are
variables too.
python objects, like javascript, can add properties and methods
through simply assign a value to it.
>>> type(show) # show is an object
<class 'projectile.Projectile'>
>>> show
<projectile.Projectile object at 0x000001C6260D0438>
>>> show.x = 0 # assign new property to show
>>> show.y = 11
>>> show.p = 22
>>> dir(show) # check it out
['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__',
'__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__',
'__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__',
'__str__', '__subclasshook__', '__weakref__', 'getHere', 'getX', 'getY', 'p',
'update', 'x', 'xpos', 'xvel', 'y', 'ypos', 'yvel']
>>>
[x] python 也可以 see function 的 source code
https://stackoverflow.com/questions/427453/how-can-i-get-the-source-code-of-a-python-function
def foo(a):
x = 2 # how about a comment?
return x + a
import inspect
# inspect.getsource(foo)
# u'def foo(a):\n x = 2\n return x + a\n'
print (inspect.getsource(foo))
==> 結果完全成功, 連 comment 也被顯示出來。
==> 但是 py> py: 組合出來的 function 不行
py> tick('test').cfa ==> 1
py> dictionary[1:] ==> [.s, <function <lambda> at 0x0000024CE15810D0>,
.s, <function <lambda> at 0x0000024CE1581158>, .s, None, None]
OK py> inspect.getsource(dictionary[2]) .
could not get source code <------------------- error message
Debug? [y/N]
同一篇 stackoverflow 介紹的 dis module 也真的可行!
>>> import dis
>>> def func(x):
... print(x+1)
...
>>> func(123)
124
>>> dis.dis(func)
2 0 LOAD_GLOBAL 0 (print)
2 LOAD_FAST 0 (x)
4 LOAD_CONST 1 (1)
6 BINARY_ADD
8 CALL_FUNCTION 1
10 POP_TOP
12 LOAD_CONST 0 (None)
14 RETURN_VALUE
>>> 哇! 顯示出 function 的機械碼, 太正點了!!
[x] Python equivalent of:
Word.prototype.toString = function(){return this.name + " " + this.help}; // every word introduces itself
--> 有了, 就是去定義 __str__ prototype of the class
#------- ex2.py ---------------
class d():
def __str__(self):
return "a __str__"
def __repr__(self):
return "a __repr__"
class x():
name = 'stella'
feet = 'splender'
#------------------------------
>>> import ex2
>>> x = ex2.x()
>>> x
<ex2.x object at 0x00000170D77202B0> <---- default __repr__ 打印
>>> print(x)
<ex2.x object at 0x00000170D77202B0> <---- default __str__ 傳回值
>>> d = ex2.d()
>>> d # <--------- 執行該 obj 時, 打印 __repr__() 的傳回值
a __repr__ # 應該讓它執行該 word
>>> print(d) # <---- obj 本身的傳回值是 __str__() 的傳回值
a __str__
>>>
[x] 進一步刺探未來的 peforth.py kernel module 的特性
Ynote: 搞懂 python 的 module files globals() locals().note
[x] docode() 要組裝 function 需參考 anonymous function 的定義方法:
https://stackoverflow.com/questions/6629876/how-to-make-an-anonymous-function-in-python-without-christening-it
Study built-in function exec() https://docs.python.org/3/library/functions.html#exec
Study build-in function compile() https://docs.python.org/3/library/functions.html#compile
[x] genxt() 成功了
[x] IDLE path working directory working folder
import sys
sys.path.append('c:/Users/hcche/Documents/GitHub/peforth')
[x] 12:50 2017/08/12 已經跑起來了, debugging compiling == 'code' 的問題
--> 可能是 end-code 裡面 Word(newname,newxt) 失敗的關係 --> no, it can't fail
--> 應該是 docode 裡面, 結構不太好, 萬一 reDef 或 genxt() 失敗了會怎樣?
很多都會半途結束, 留下 compiling == 'code' 的問題。 --> all tested, behavior acceptable now
[x] "import re" in peforth.py kernel is not a good choice.
Simply letting the main program to do that. The main program is eforth.3py
--> Yeah! it works.
c:\Users\hcche\Documents\GitHub\peforth>python eforth.3py
hello eforth!!
--> 錯了, 每個 .py 檔都自己 import re, import pdb 反而是對的, see:
https://stackoverflow.com/questions/8957859/python-child-cannot-use-a-module-the-parent-imported
... Generally if you're doing simple obvious things like importing a standard module,
you should do it the simple and obvious way......
[x] reproduce the problem:
import peforth as vm
vm.dictate('code test end-code') # Try this first
vm.words['forth']
這樣是成功的,但是進入 forth command line 之後, 同樣的工作... 還是成功的。
--> 改試 vm.dictate('code test3 print("hello test3!!") end-code')
>>> vm.execute('test3') --> hello test3!! 很成功
--> 進 forth command line
>>> vm.peforth()
OK code test4 print("hello test4") end-code
OK test4
hello test4
OK
還是很成功
--> 好像要出過 error e.g. word unknown 之類才能複製問題
>>> code test5 end-code
File "<stdin>", line 1
code test5 end-code
^
SyntaxError: invalid syntax
>>>
的確是這樣!!! now I've got the SRP
--> 似乎是 w.xt(w) 執行 end-code 時出問題, 檢查此時的 end-code
RI, outer() 裡面分辨 token 是否 [int, float] 用 eval(token) 會有 exception
必須要用 try - except 處理才行。 --> Fixed !!!
[x] why after OK type 'words' no response <--- should be : Error! words unknown.
--> 結果發現, 所有的 dir(vm) attributes 都這樣!!
(Pdb) eval('pop') ==> <function pop at 0x00000178A534A730>
(Pdb) eval('dictionary') ==> [0]
(Pdb) eval('stack') ==> [{'forth': [0, code, end-code, //, stop, *debug*]}, {'forth': [0, code, end-code, //, stop, *debug*]}, {'forth': [0, code, end-code, //, stop, *debug*]}, <class 'peforth.Word'>, <function phaseA at 0x00000178A534A0D0>, <function phaseB at 0x00000178A534A158>]
所以, outer() 還要再改良。
--> eval() 的結果 + 0 就可以保證他是 number 了
[x] kernel project-k.py instead of peforth.py
[X] code word's help, not easy, keep the record.
# stack diagram
ntibwas, s = ntib, nextstring("\\(")
if s['flag']: # stack diagram is existing
pdb.set_trace()
newhelp = '( ' + nexttoken('\\)') + nexttoken() + ' '
else: # return s to tib
ntib = ntibwas
# word description
ntibwas, s = ntib, nextstring("\\")
if s['flag']: # description is existing
newhelp += nexttoken('\n|\r')
else: # return s to tib
ntib = ntibwas
code \ last().help += nexttoken('\n|\r'); end-code immediate
// ( <comment> -- ) Give help message to the new word.
code ( last().help = '( ' + nexttoken('\\)') + nexttoken() + ' ' end-code immediate
// ( -- ) Get stack diagram to the last's help.
--> v1.23 code words 可以用 # 下 help 了。
[x] In jeforth, window.colonxt is dynamicly created by definition of ':'.
Can peforth.f do that too in python? Yes!!!
>>> def test():
... globals()['cc'] = 123
...
>>> cc
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'cc' is not defined
>>> test()
>>> cc
123
>>>
[/] : test ;
'module' object does not support item assignment
Debug? [y/N] y
RI: last().xt = xt # also vm['colonxt'] <------ [/] easy, deal with this later
[x] After the above error probably, after colon definition the compiling is still True!!!
--> because forgot declare it a global.
B i n g o ! ! colon definition works now
[x] literal needs to use closure
def gen(n): # function generator
def f(): # literal run time function
print(n)
f.description = "{} {}".format(type(n),n)
return f
f = gen([11,22,33])
f()
>>> f.description
"<class 'list'> [11, 22, 33]"
# functions are not shown by __str__ and __repr__ like dict
# def str(self): # return help message
# return f.description
# def repr(self): # execute xt and return help message
# return f.description
# str.desc = "I am str"
# repr.desc = "I am repr"
# f.__str__ = str
# f.__repr__ = repr
[x] py> py: 都應該改用 compile(code,"")
compile CN http://www.th7.cn/Program/Python/201608/923063.shtml
用到 lambda 就不能用來【賦值】, 安全理由. 故 py: 不能用 lambda. 要的話就必須用 compile 的。
https://stackoverflow.com/questions/20695745/why-use-lambdas-vs-1-line-function-declarations
--> [x] 已經發現 py: tick('//').immediate=True 行不通了!!!
--> 用 <py> </py> </pyV> 分別改寫了 py: py> , ok now
[x] pyExec pyEval 是多餘的 --> 去除
[x] (Pdb) execute("sdfsdf")
(Pdb)
沒半點錯誤訊息, 有問題看不出來!!
--> fixed, now it's a panic.
[x] compiling 未定義怎麼不觸發 unknown?
--> outer() 用 eval(token) 想判斷 token 是否 number 不行,
當 token='compiling' 時不會觸發 exception 反而傳回其值 True or False !!
--> 改用 complex(token) 很完美!
[x] t> >t t@
>>> line = 'Cats are smarter than dogs\n\\ 1234\n\\ 2233'
>>> matchObj = re.search( r'\n\\ (\d*)$', line)
>>> matchObj.group()
'\n\\ 2233'
>>> matchObj.group(1)
'2233'
>>> len(matchObj.group())
7
>>> line[:-7]
'Cats are smarter than dogs\n\\ 1234'
>>>
[x] [/py] [/pyV] 只分別取得 exec-code 與 eval-code 不執行, 可以用 execute 執行嗎?
[x] execute 也要能執行 exec-code 或 eval-code ---> done
[x] 這兩個都不要,應該是個 compyle ( 'source' -- code object ) \ python compiler 指令
[x] 讓 execute() 認得 code object
--> OK ' compyle .
compyle ( "source" -- exec-code ) Python compile source to exec-code object __str__ OK
OK char print('hi') compyle
OK execute
hi
OK 一次就成功了!!
[x] colon definition 裡看能不能用 comma 塞入一個 code object ?
--> : test char print('hi') compyle execute ; 成功
: test2 [ char print('hi') compyle , ] ; 也成功
: cfa ' py> dictionary[pop().cfa:] . cr ;
OK cfa test2
[ /* test2 */ <code object <module> at 0x0000019B24E1F8A0, file "", line 1>, None,
/* cfa */ ', <function xt.<locals>.<lambda> at 0x0000019B24E29C80>, ., cr, None,
None]
OK
[x] 有了 compyle 要不要改寫 <py> </py> </pyV> 等?
--> 只簡化了 </py> 一點點
[x] debug :: --> root cause 又是 branch 裡 assignment to ip 忘了加 vm.ip
OK 11 22 ' + :: xt() .s ==> [33] OK 表示 :: interpret mode 功能 ok
OK : test :: xt() ;
--Return--
> <string>(2)xt()->None
(Pdb) c
OK see-cfa test
[<code object <module> at 0x000001F1364F68A0, file "", line 1>, None, None]
OK 22 33 ' + test
OK .s
[55]
OK
[x] constant 要用到 vm.forth['varname'] 複習一下 python 語法
constant 要做的事 --> 'push(vm["forth"]["x"])'
一開始 word-list 都沒有自己的空間
(Pdb) vm['forth']
*** TypeError: 'module' object is not subscriptable
(Pdb) vm.forth
*** AttributeError: module 'projectk' has no attribute 'forth'
不能這樣 init :
(Pdb) vm['forth']={}
*** TypeError: 'module' object does not support item assignment
要這樣 init :
(Pdb) setattr(vm,'forth',{})
Object 的 attribute 不能這樣 access :
(Pdb) vm['forth'] <--- 這是 dict 的方式
*** TypeError: 'module' object is not subscriptable
要這樣 access :
(Pdb) vm.forth
{}
(Pdb) getattr(vm,'forth')
{}
(Pdb)
[x] colon definition 失敗還是會佔一個位置
OK 123 constant x
OK 345 to x
Error! Assigning to a none-value.
Debug? [y/N]
OK : test 44445555 to x ;
Error! Assigning to a none-value. <--- 馬上觸發錯誤,好。
Debug? [y/N]
OK words
0 code end-code // ...snip... to x test <--- test 佔了位置
OK : ttt ;
OK words
0 code end-code // ...snip... to x test ttt <--- 確實佔了位置
OK test
Error! test unknown. <---- colon definition 失敗, 只是沒有 reveal 而已
Debug? [y/N]
OK rescan-word-hash <---- rescan 之後它就會出現!!
OK test
OK .s
[44445555]
OK
--> jeforth 也一樣, 算了, 有警告就可以了。
--> (forget) 一下可以把它消除掉
[x] tib 平時有被 corrupted
OK char $ . rewind
OK 11 22 33 *debug* # <---- 最簡單的
(Pdb) tib
'112233*debug*' # <----- 就已經有問題了 !!!
(Pdb)
問題在 kernel nexttoken() 裡面
--> Root cause 1 : nexttoken() <--- skip leading white spaces 改寫
Root cause 2 : tib and ntib are strange <-- ntib 太大先排除
[x] writeTextFile 實驗
OK <py> open("pathname.txt", "wt")</pyV> constant f
reDef f
OK f .
<_io.TextIOWrapper name='pathname.txt' mode='wt' encoding='cp950'> OK f :> name
--> pathname.txt OK
OK f :: write("abc")
OK f :: write("123")
OK f :: write("中文")
OK f :: close()
encoding='utf-8'
[x] refill works now. Use refill to improve <text> first. Let it accept
multiple lines. ---> 最後是簡單地引進 accept2 用 Ctrl-D 切換 multiple-line mode 即可. 保留以下研究過程。
: <text>.interpret ( <multi-lines> -- "string" ) // get multi-lines string from ternimal
CR word ( s )
begin
accept if ( s line )
\ get string to s, leave </text> and the rests in tib by adjusting ntib
py> re.search("(.*)</text>(.*)",tos()) ( s line re )
py> bool(tos()) if \ line has </text> ?
( s line re )
py: vm.tib="</text>"+tos().group(2);vm.ntib=0;
\ s += re.group(1)
nip ( s re ) :> group(1) + ( s )
exit
else ( s line re )
\ s += line
drop + ( s )
else ( s )
\ s += '\n'
py> pop()+'\n'
then
refill
again ;
我發現, bool(regEx) 可以看出 re.search 的結果是否 found
[x] See MetaMoji 討論如何適當分割以上複雜的 <text>.interpret 成簡單的 一行成功; 多行輸入 兩段。
其中多行輸入是個公用 routine
[x] 實驗後綴法是否有簡化功效? 使 group(1) 成為共同的結果
\ regular expression 實例
OK <py> re.search("(.*?)</text>(.*)","aa </text>bb</text>")</pyV> ( re ) constant re
OK re bool . cr ^^^^^^ 故意加上後綴讓 re.search 總是成功
True <--- 總是成功
OK re :> group() . cr
aa </text>bb</text>
OK re :> group(1) . cr
aa <----------------------------- group(1) 為所求
OK re :> group(2) . cr
bb</text> <-------------------- group(2) 去掉後綴之後還給 tib
OK <py> re.search("(.*?)</text>(.*)","aa bb</text>")</pyV> ( re ) constant re
OK re bool . cr
True
OK re :> group() . cr
aa bb</text>
OK re :> group(1) . cr
aa bb <------------ 當 bool group(2) False 時 group(1) 仍為所求, 故確有簡化功效
OK re :> group(2) . cr
OK re :> group(2)=="" . cr
True
OK re :> group(2) bool .
False OK
[x] 多行輸入公用 routine
[x] 19:46 2020/10/04 複習需要 ^D 多行輸入 multiple lines inpue 的原因:如果是 colon definition 本來
就可以在 compiling state 多行輸入,問題出在 code ... end-code 期間需要 ^D multiple lines input.
: accepts ( "deli" <multiple lines> -- "string" ) // Get multiple lines from tib up to delimiter
( deli )
begin
accept if ( s line )
\ get string to s, leave </text> and the rests in tib by adjusting ntib
py> re.search("(.*)</text>(.*)",tos()) ( s line re )
py> bool(tos()) if \ line has </text> ?
( s line re )
py: vm.tib="</text>"+tos().group(2);vm.ntib=0;
\ s += re.group(1)
nip ( s re ) :> group(1) + ( s )
exit
else ( s line re )
\ s += line
drop + ( s )
else ( s )
\ s += '\n'
py> pop()+'\n'
then
refill
again ;
code accept2 # use Ctrl-D at the end to terminate the input. py> chr(4)=='^D' --> True
result, s = "", input()
while not chr(4) in s:
result += s
s = input()
result += s.replace(chr(4),'\n') # all ^D become \n
push(result)
push(True)
end-code // ( -- str T|F ) Read a line from terminal.
[x] accept can be single line accept1 or multiple lines accept2 , switch by Ctrl-D
8: [EOT] (<class 'str'>) <---- the Ctrl-D from input()
OK py> ord(tos()[0]) . cr
4
OK
示範 <accept> ... </accept> 的用法
------- clipboard ---------
dropall
<accept>
11
22
33
44
55
</accept>
66
77
88
99
----------------------------
OK dropall # paste 之後的樣子
OK <accept>
11
22
33
44
55
</accept>66 # 這是最後一行,注意!66 可以往前緊貼, delimiter 會整個被忽略掉。
OK 77
OK 88
OK 99
----------------------------
OK .s # 看看結果 .......
0: 11
22
33
44
55
66
(<class 'str'>)
1: True (<class 'bool'>)
2: 77 4Dh (<class 'int'>)
3: 88 58h (<class 'int'>)
4: 99 63h (<class 'int'>)
OK
[x] .s in trouble when cell is False, None ... etc
[x] peforth.py 可以直接執行 : python peforth.py
也可以由 python interpreter 執行: >>> peforth.main() 此時 exit 回到 python interpreter
bye 則會傳回 errorlevel 回到 DOS.
# 從 python interpreter 就可以看到 peforth.py module 裡的 globals
>>> dir(peforth)
['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__',
'__package__', '__spec__', 'greeting', 'main', 'panic', 'readTextFile',
'vm', 'writeTextFile']
# 從 python interpreter 更可以看到 project-k vm 裡的 globals
>>> dir(peforth.vm)
['EXIT', 'RET', 'Word', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__spec__', 'code', 'colonxt', 'comma', 'compiling', 'context',
'context_word_list', 'current', 'current_word_list', 'debug', 'dictate', 'dictionary',
'dis', 'docode', 'doendcode', 'endcode', 'execute', 'forth', 'genxt', 'greeting',
'here', 'inner', 'inspect', 'ip', 'isReDef', 'json', 'last', 'major_version', 'multiple',
'name', 'newhelp', 'newname', 'newxt', 'nextstring', 'nexttoken', 'ntib', 'order', 'os',
'outer', 'panic', 'pdb', 'phaseA', 'phaseB', 'pop', 'push', 're', 'readTextFile', 'reset',
'rstack', 'rtos', 'stack', 'stop', 'tib', 'tick', 'tos', 'version', 'vm', 'vocs', 'wordhash',
'words', 'writeTextFile']
# 從 python interpreter 也可以執行 peforth
>>> peforth.vm.dictate
<function dictate at 0x000001D1368E2510>
>>> peforth.vm.dictate('version')
p e f o r t h v1.01
source code http://github.com/hcchengithub/peforth
# 在 peforth 裡面定義的東西, 回到 python interpreter 取用:
>>> peforth.main()
OK 123 constant x
OK exit
>>> peforth.vm.forth
{'obj2dict': <function object2dict at 0x000001D136934510>, 'x': 123}
>>> peforth.vm.forth['x'] --> 123
# 用 obj2dict() 把 Word 轉成 dict, 這是 see 的準備
>>> peforth.vm.forth['obj2dict'](peforth.vm.tick('+'))
{'__class__': 'Word', '__module__': 'projectk', 'name': '+', 'xt': <function xt at 0x000001D1368F28C8>, 'immediate': False, 'help': '( a b -- a+b) Add two numbers or concatenate two strings.', 'comment': '', 'vid': 'forth', 'wid': 51, 'type': 'code'}
[x] see code words
# json 需要先給它 obj2dict() function 才能處理我們的 object
OK py> json.dumps(tick('+'),indent=4) .
Failed to run <Word '</pyV>'>: Object of type 'Word' is not JSON serializable
Continue, Debug, or Abort? [C/d/a] a ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# 從 peforth 裡面定義轉換 function
<py>
def object2dict(obj):
#convert object to a dict
d = {}
d['__class__'] = obj.__class__.__name__
d['__module__'] = obj.__module__
d.update(obj.__dict__)
return d
push(object2dict)
</py>
^D
OK .s
0: <function object2dict at 0x000001D136934510> (<class 'function'>)
OK constant obj2dict
OK exit
# 有了轉換 function 就可以讓 json 完成工作
>>> import json
>>> print(json.dumps(peforth.vm.tick('+'),default=peforth.vm.forth['obj2dict'],indent=4))
{
"__class__": "Word",
"__module__": "projectk",
"name": "+",
"xt": {
"__class__": "function",
"__module__": "projectk",
"source": "def xt(_me=None): ### + ###\n push(pop(1)+pop()) \n",
"name": "+"
},
"immediate": false,
"help": "( a b -- a+b) Add two numbers or concatenate two strings.",
"comment": "",
"vid": "forth",
"wid": 51,
"type": "code"
}
>>>
[x] code object 希望能帶 source code 以供 see
OK 45 @ dir .
['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__',
'__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__',
'__le__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
'__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'co_argcount', 'co_cellvars',
'co_code', 'co_consts', 'co_filename', 'co_firstlineno', 'co_flags', 'co_freevars',
'co_kwonlyargcount', 'co_lnotab', 'co_name', 'co_names', 'co_nlocals', 'co_stacksize',
'co_varnames'] OK
OK --> 不行, code object 裡面不能新增 attribute 也不能改裡面的
若不行, 只好模仿 Word 弄成一個 class 來裝 code object 就可以帶上 source code
或用 closure , 也就是 genxt() 的方法也是現成已經成功的辦法。也不見得比 compyle 差。
或用 dis.dis(func) 也好, 更具視覺效果
[x] 想到給 code object 加上 source code 顯示的辦法了, 引進 class Comment, 類似 class Word
但是 do nothing (由 phaseA phaseB 實現) 只帶著 comment comma(Comment('lalalal')) 進
dictionary 裡去躺著,等 see command 來利用。
OK py: comma(Comment("lalala"))
OK here
OK .
637 OK 636 @ .
lalala OK 636 @ type . --> <class 'projectk.Comment'>
OK 636 @ .
lalala
OK 636 @ execute -->
Failed to run <Word 'execute'>: must be str, not Comment
Continue, Debug, or Abort? [C/d/a] a
[x] modify phaseA phaseB to support Comment class
--> done!
[x] modify ::, :>, </py>, and </pyV> to add comment
[x] 目前 literal 仍被當一般 function 用 dis.dis() 顯示 --> 改成顯示 literal
OK 339 @ . # 已知 339 處是個 literal function
<function xt.<locals>.f.<locals>.literal at 0x000001ED9B6579D8> OK 339 @ :> __name__ .
OK 339 @ :> str . # 印出 readable 的方法
Literal: pop(). <class 'str'> OK
--> 可以修改 toString 了
==> see 終於完成了!!!
[x] 其實 __doc__ attribute 就是用來放說明文字的 . . .
--> 錯!
Failed to run <Word '</py>'>: 'code' object attribute '__doc__' is read-only
Continue, Debug, or Abort? [C/d/a]
可是我試過了呀!? 如下:
00035: RET (<class 'NoneType'>)
00036: Literal: \\n|\\r <class 'str'>
00037: RET (<class 'NoneType'>)
00038: lambda:push(eval(vm.greeting())) (<class 'projectk.Comment'>)
00039: (<class 'function'>)
7 0 LOAD_GLOBAL 0 (push)
2 LOAD_GLOBAL 1 (eval)
4 LOAD_DEREF 0 (eval_code)
6 CALL_FUNCTION 1
8 CALL_FUNCTION 1
10 RETURN_VALUE
OK 39 @ .
<function xt.<locals>.<lambda> at 0x0000017E8D269598> OK
OK 39 @ dir .
['__annotations__', '__call__', '__class__', '__closure__',
'__code__', '__defaults__', '__delattr__', '__dict__',
'__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__get__',
...snip...]
OK 39 @ :> __doc__ .
None
OK 39 @ :: __doc__="abc"
OK 39 @ :> __doc__ .
abc OK
這是 : version py> vm.greeting() ; // ( -- revision ) print the greeting message and return the revision code
compile() 出來的 eval_code, exec_code 的 __doc__ 都是 read-only, 但是
包過一層 lambda 之後就可以編寫了。 <------ 真相大白!!
--> </py> 直接 comma(exec_code) 實在沒有好處, 犧牲了 __doc__ 又
迫使 phaseB 無謂地變得複雜。
--> [x] 改掉!
[x] these lines are strange,
"" value description ( private ) // ( -- "text" ) description of a selftest section
[] value expected_rstack ( private ) // ( -- [..] ) an array to compare rstack in selftest
[] value expected_stack ( private ) // ( -- [..] ) an array to compare data stack in selftest
0 value test-result ( private ) // ( -- boolean ) selftest result from [d .. d]
[] value [all-pass] ( private ) // ( -- ["words"] ) array of words for all-pass in selftest
the "( private )" become prefix of their word.help !
--> value command gets stack diagram ?
--> ( command 看到 last 沒有 help 就把後續的 (...) comment 加進去了! 應該限制
compiling state 才這麼做。
[x] *** debugging, OK now. RI: constant and value were in trouble due to that I
changed the Comment word and the way to compile code objects.
[x] python shell and eforth 互相參考手上的資料
>>> peforth.main()
OK 0 constant shell # peforth 定義的變量
OK exit
# 從外面把 globals() 給它
>>> getattr(peforth.vm,'forth')['shell']=globals()
>>> peforth.vm.forth
{'obj2dict': <function obj2dict at 0x000002C8D8F5B1E0>,
'description': '', 'expected_rstack': [], 'expected_stack': [],
'test-result': 0, '[all-pass]': [],
'shell': {'__name__': '__main__', '__doc__': None, '__package__': None,
'__loader__': <class '_frozen_importlib.BuiltinImporter'>,
'__spec__': None, '__annotations__': {}, '__builtins__': <module 'builtins' (built-in)>,
'peforth': <module 'peforth' from 'c:\\Users\\hcche\\Documents\\GitHub\\peforth\\peforth.py'>}}
>>> peforth.main()
OK shell .
{'__name__': '__main__', '__doc__': None, '__package__': None,
'__loader__': <class '_frozen_importlib.BuiltinImporter'>,
'__spec__': None, '__annotations__': {},
'__builtins__': <module 'builtins' (built-in)>,
'peforth': <module 'peforth' from 'c:\\Users\\hcche\\Documents\\GitHub\\peforth\\peforth.py'>}
OK
# 從外面 DOS copy-paste 進來,一氣呵成 (不要 indent, 用 block mode)
python
import peforth
peforth.vm.dictate('0 constant shell')
peforth.vm.dictate('// ( -- dict ) 最外層 python interpreter 的 globals()')
getattr(peforth.vm,'forth')['shell']=globals()
peforth.main() # 從 python interpreter 切換進入 peforth
\ 進入了 peforth interpret state
<accept> \ 從 terminal 收取跨行 input lines
<py>
import sys
push(sys)</py> constant sys
// ( -- sys ) The sys module. Try: sys py: help(pop())
</accept> \ ( -- string T|f ) 從 terminal copy-paste 進來的 string
[if] tib.insert help sys [then]
[x] examples tools utilities goodies 範例 栗子 例子
\ 列出所有的 code words
<py> [w.name for w in words['forth'][1:] if 'code' in w.type] </pyV>
\ 列出所有的 selftest passed words
<py> [w.name for w in words['forth'][1:] if 'pass'==getattr(w,'selftest',False)] </pyV> . cr
\ 列出所有 immediate words
<py> [w.name for w in words['forth'] if getattr(w,'immediate',False) ] </pyV> . cr
\ 把尾巴 2 個 TOS 切出來成為單獨的 list (array)
( -2 ) >r py: t,vm.stack=stack[rtos(1):],stack[:rpop(1)];push(t)
--> slice
\ Execute DOS command
OK <py> exec('import os',globals(),globals())</py> # import the os module
OK py: os.system('dir')
Volume in drive C is Windows
Volume Serial Number is 2EA4-3202
Directory of c:\Users\hcche\Documents\GitHub\peforth
2017-08-23 09:31 <DIR> .
2017-08-23 09:31 <DIR> ..
2017-07-31 20:35 65 .gitattributes
2017-06-25 13:31 18,226 voc.f
2017-08-25 13:03 <DIR> __pycache__
10 File(s) 178,951 bytes
3 Dir(s) 264,579,960,832 bytes free
OK
# But after <py> os.system(r"cd c:\Users\hcche\Documents\GitHub\ML\WH300")</py>
the peforth working directory is not changed. It changes only the temperary shell.
\ copy 以下 comment (用 np++ column mode) 從 DOS box Ctrl-V 一路跑起來
<comment>
python
import peforth
peforth.vm.dictate('0 constant shell')
peforth.vm.dictate('// ( -- dict ) 最外層 python interpreter 的 globals()')
getattr(peforth.vm,'forth')['shell']=globals()
peforth.main() # 從 python interpreter 切換進入 peforth
\ 進入了 peforth interpret state
<accept> \ 從 terminal 收取跨行 input lines
<py>
import sys
push(sys)</py> constant sys
// ( -- sys ) The sys module. Try: sys py: help(pop())
</accept> \ ( -- string T|f ) 從 terminal copy-paste 進來的 string
[if] tib.insert help sys [then]
</comment>
\ DOS command line one-liner to print the path environment variable
c:\Users\hcche\Desktop>python -m peforth s' push(os.get_exec_path())' compyle execute (see) bye
[x] <accept> <py> does not work when unless putting <py> to next line <---- problem
--> rest of the line after <accept> should be the first line of the multiple lines
[x] OK include c:\Users\hcche\Documents\GitHub\ML\WH300\wh300.f
C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
"This module will be removed in 0.20.", DeprecationWarning)
Failed to run <Word 'sinclude'>: pop from empty list
Continue, Debug, or Abort? [C/d/a] a
OK
--> possibly because rstack are to be used to return while reset() ( stop command )
clears the rstack. --> 應該是猜對了。 stop command 只能中斷 outer loop 不能把 rstack 清掉!!
[x] let <accept> <text> auto indent. Use spaces before </accept> </text> as the common strip.
--> study <text> </text> 直接用 BL word 把 </text> 之前的 spaces 都忽略掉了, 這裡要改一下。
--> code test push(nextstring('[^ ]')) end-code test 123 得到:
0: {'str': ' ', 'flag': True} (<class 'dict'>)
1: 123 7Bh (<class 'int'>)
用來取得 </text> 之前的 spaces --> 這只是一法,也不太好。
--> 不如取所有 lines 的 leading spaces 之最大公因數,一律刪除就對了。
1. 切成 lines in an array
</text> :> splitlines() ( [lines] )
2. 算出每行的前導 spaces 個數
len - lstrip
OK s" abc" py> len(pop()) tib.
s" abc" py> len(pop()) \ ==> 7 (<class 'int'>)
OK s" abc" :> lstrip() py> len(pop()) tib.
s" abc" :> lstrip() py> len(pop()) \ ==> 3 (<class 'int'>)
OK
3. 取最小值,
OK py> min([1,2,3]) tib.
py> min([1,2,3]) \ ==> 1 (<class 'int'>)
OK
4. 每行都去除這麼多前導 spaces
[ e for e in m]
cls dropall <accept>
<text>
line1
line2
line3
line4
line5
</text> constant lines
</accept>
drop tib.insert
lines :> splitlines() constant [lines]
<py> map(lambda x:len(x)-len(x.lstrip()),vm.forth['[lines]'])</pyV>
constant leading-spaces // ( -- map ) 只能用一次!
\ 檢查 leading-spaces 有兩種方法,後者才漂亮
\ <py> [i for i in vm.forth['leading-spaces']]</pyV> tib. \ check leading-spaces
\ leading-spaces py> list(pop()) .
\ OK leading-spaces py> list(pop()) . # 如果 map 不大這個可以考慮
\ [12, 16, 16, 16, 16, 8] OK
\ OK leading-spaces py> list(pop()) . # map 之類的 iterator 都不能 rewind/reset
\ [] OK
leading-spaces py> min(pop()) constant common-indent
[lines] common-indent <py> [i[tos():] for i in pop(1)]</pyV> nip constant [result]
result py> "\n".join(pop()) constant result // ( -- string ) the cooked multi-lines string
: -indent ( multi-lines -- cooked ) // Remove common indent of the string
:> splitlines() ( [lines] )
<py> map(lambda x:len(x)-len(x.lstrip()),tos())</pyV> ( [lines] map[^spaces] )
py> min(pop()) ( [lines] indent )
<py> [i[tos():] for i in pop(1)]</pyV> nip ( [result] )
py> "\n".join(pop()) ;
code -indent
lines = pop()
array = lines.splitlines() # [lines]
spaces = map(lambda x:len(x)-len(x.lstrip()),array) # [spaces]
indent = min(spaces) # number of common indent
cut = [i[indent:] for i in array] # [cuted lines]
push("\n".join(cut)) end-code
// ( multi-lines -- cooked ) Remove common indent of the string
bingo! it works!
[x] don't need to use map in -indent, use [f(i) for i in lines.splitlines()]
should be enough --> Yes! The following two lines are equivalent:
spaces = map(lambda x:len(x)-len(x.lstrip()),array) # iterator
spaces = [len(x)-len(x.lstrip()) for x in array] # list
[x] Start to use peforth for the wh300 project . . .
用 peforth 來實現 wh300
第一個好消息就是 import module 變成 forth word 成功了!!
<py>
import numpy as np
push(np)
</py> constant np // ( -- numpy ) The numpy module
OK np .
<module 'numpy' from 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\numpy\\__init__.py'> OK
OK
--> import to globals() is the best way. The above method is interesting but not the best.
--> Done ! wh300.f works fine now.
[x] -indent 很聰明地 " "*100 的花招把 </text> 之前的線索給毀了!!! 目前變成過度 indent.
--> 過度 indent 修好了, constant 的 runtime 又出問題。因為是 runtime, root cause 很難找。
Root cause : 下面 lambda 的 code 內縮了,應該不要。所以是 -indent 有問題。
str__', '__subclasshook__'] OK py> dictionary[456].__doc__ .
lambda:exec(
source = '\tpush(getattr(vm,"{}")["{}"])'.format(current, last().name)
last().xt = genxt('constant',source)
if not getattr(vm,current,False): setattr(vm,current,{})
exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name))
) OK 123 constant x
Failed to run <function xt.<locals>.<lambda> at 0x000001C0C39B61E0>: unexpected indent (<string>, line 2)
Continue, Debug, or Abort? [C/d/a] a
OK
--> 對照 ok of 'see constant' 可見得上面問題版的 lambda source code 裡有多的 indent
------------ Definition in dictionary ------------
00456: lambda:exec(
source = '\tpush(getattr(vm,"{}")["{}"])'.format(current, last().name)
last().xt = genxt('constant',source)
if not getattr(vm,current,False): setattr(vm,current,{})
exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name))
)
--> 先用醜版面過關取得完整功能, 再來對付它。
--> interpret state ok, try compile --> ok too --> so what's the problem..it's clear
當 <py> 之後跟著兩個 space 時其實這個實驗就已經複製到問題了, 厲害的是要到 test 的
runtime 才會執行 lambda 從而觸發到 unexpected indent ... 難怪這麼難抓!!
: test
<py>
a=1
b=2
c=3
</py> ;
--> breakpoint 在 -indent 當 last==constant 時
code -indent
if debug and last().name=='constant': pdb.set_trace() <--- 斷到了
...snip....
--> constant 改到有問題的 <py>..</py> 版本 --> 看看這時 -indent 收到啥
|(Pdb) p lines
|' \n source = \'\\tpush(getattr(vm,"{}")["{}"])...snip...
^---- 這個 space 就是問題所在了 !!!! 真難找。
--> Root cause: in constant source code, after the <py> an extra space was there!
--> See Ynote : "peforth -indent command indent 問題探討-- 成功了! 扫描_20170828180311 _peforth_"
[X] reset() 能不能強一點? panic() 好幾次很煩....也許有意義?
[x] compyle 裡用到 lambda 來產生 function 有問題!
# 這個可以!
>>> s = '''
... dd = {'a':123,'b':456}
... print(dd)
... v = [dd[i] for i in dd] # 取得所有的 value of a dict
... print(v)
... '''
>>> exec(s) # <----------- 直接執行 exec() 很好,沒問題
{'a': 123, 'b': 456}
[123, 456]
--
# 經過 lambda 之後 local name space 就有怪現象了
# 如下不行了, 這是經過 lambda 之後產生的結果。 compyle command 不要用 lambda . . . .
... s = '''
... dd = {'a':123,'b':456}
... print(dd)
... v = [dd[i] for i in dd] # 取得所有的 value of a dict
... print(v)
... '''
>>> f = lambda:exec(s)
>>> f()
{'a': 123, 'b': 456}
NameError: name 'dd' is not defined
>>>
--> compyle 裡改用 genfunc(source) 來產生 function
----- this snippet works fine ------------
<py>
# python does not support annoymous function. But it supports closure,
# so we can recover it. genfunc("body","args") returns a function which
# is composed by the given source code and arguments.
def genfunc(body,args):
local = {}
source = "def func({}):".format(args)
# args is something like "", or 'x, y=123,z=None'
if body.strip()=="":
source = source+"\n pass\n";
else:
source = (source+'\n{}').format(body)
try:
exec(source,globals(),local)
except Exception as err:
panic("Failed in genfunc(body,{}): {}\nBody:\n{}".format(args,err,body))
local['func'].__doc__ = source
return local['func']
push(genfunc)
</py> constant genfunc // ( -- func ) function generater genfunc(body,args)
genfunc <py> pop()(' print("hi")',"")</pyV> :: ()
\ ==> hi
( arguments ) s" x,y"
( body ) <text>
result = x**2 + y**2
print(result)
</text> -indent
genfunc :> (pop(),pop()) constant f // ( -- func ) f(3,4) prints 25 which is 3^2+4^2
f :: (3,4)
\ ==> 25
----- this snippet works fine ------------
結果:
^D
hi <--- 正確,正確
25
Multiple-line mode is on, Ctrl-D switches it off.
OK
--- genfunc() 進了 project-k kernel -----------
( name ) s" lalala"
( arguments ) s" x,y"
( body ) <text>
result = x**3 + y**3
print(result)
</text> -indent
py> genfunc(pop(),pop(),pop()) constant f f :: (3,4)
# it works fine !!
--- 有問題要到 runtime 才會發現, 故 selftest 很重要 -----------
( name ) s" lalala"
( arguments ) s" x,y"
( body ) <text>
result = x*y
print(resultttttttt)
</text> -indent
py> genfunc(pop(),pop(),pop()) constant f
\ 到這裡都沒問題, 以下執行了才發現問題,而且 error message 線索差很遠
OK f :: (1,2)
Failed in </py> command: name 'resultttttttt' is not defined
Body:
pop()(1,2)
Continue, Debug, or Abort? [C/d/a]
----- it works fine --------------
[x] 改用 genfunc() 取代 lambda 之後, indent 習慣又全變了, 因為 function body
一定要 indent 而與原來的 exec(body) 相反。 共有 <py> py> py: :: :> 這些
東西受影響, 剩下 :: :> 要改 --> all done.
[x] Now without lambda (genfunc instead) test the original problem:
<py>
dd = {'a':123,'b':456}
print(dd)
v = [dd[i] for i in dd] # 取得所有的 value of a dict
print(v)
</py>
results:
{'a': 123, 'b': 456}
[123, 456] <---------------- Pass!!
[x] code compyle
execute('-indent');execute('indent')
若用 dictate('-indent indent') 則無效, 何故?
--> 以下實驗卻又都 ok !
--> RI: 因為當時在 compiling state !! 用 dictate() 的結果是把兩個 words
compile 進去了,既沒效果又出別的問題。
==> 用 dictate() 問題比較多,不能放心亂用。
這兩行 debug trick 技巧留作紀念:
if tos().find('vm.greeting')!=-1: pdb.set_trace()
dictate('-indent indent') # 奇怪, dictate 就不行???
[x] (forget) in trouble now
OK (forget)
Failed to run <function compyle_anonymous at 0x0000018230B22400>: 'Word' object has no attribute 'cfa'
--> 這問題自動好了
[x] improve the greeting when imported from python interpreter
OK py> sys.argv .
['peforth.py'] <------- run from DOS box
>>> import peforth
OK py> sys.argv .
[''] <----------------- run from python interpreter, need more help messages
[x] 整理 try - exception in peforth.f
# 從 python interpreter 裡用 genfunc() 產生 function
>>> f = peforth.vm.genfunc(" 1/0",'','test2')
>>> f
<function test2 at 0x000001B42DB13E18>
# 測試看看,確實會出錯
>>> f()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 2, in test2
ZeroDivisionError: division by zero
>>> f
<function test2 at 0x000001B42DB13E18>
# 直接 compile 進 peforth 的字典
>>> peforth.vm.comma(f)
# 進到 peforth 一執行 error message 又是 </py> 發的!
>>> peforth.main()
OK here 1- @ :: ()
Failed in </py> (compiling=False): division by zero
Body:
pop()()
Continue, Debug, or Abort? [C/d/a] a
# 檢查看看,他確實是 test2
OK here 1- @ :> __doc__ .
def test2():
1/0 OK
--> 探討原因,似乎「誰執行的,error message 就打給誰」,這樣應該資訊比較充分。
:: 裡面 interpret state 是 </py>, compiling state 則是 compyle --> 試試看
OK here 1- @ constant f \ 取得 test2 function
OK : test f :: () ; \ 故意讓 :: 的 compiling state 表演
OK test \ 一執行,報錯的變成 phaseB()
Callable in phaseB <function compyle_anonymous at 0x000001CC3771D1E0>: division by zero
Body:
def compyle_anonymous():
pop()()
Continue, Debug, or Abort? [C/d/a] a
--> ^^^^^^^--- 這個 Body information 似乎沒啥用,好像錯了?其實沒錯。
--> 如下,這是 f :: () 這種寫法的結果,沒錯,它的 Body 當然顯示不出 f 的 source code
------------ Definition in dictionary ------------
00711: f __str__ (<class 'projectk.Word'>)
00712: def compyle_anonymous():
pop()() (<class 'function'>)
2 0 LOAD_GLOBAL 0 (pop)
2 CALL_FUNCTION 0
4 CALL_FUNCTION 0
6 POP_TOP
8 LOAD_CONST 0 (None)
10 RETURN_VALUE
00713: RET (<class 'NoneType'>)
------------ End of the difinition ---------------
--> 正確的寫法是 :
OK : test2 [ f , ] ;
OK test2
Callable in phaseB <function test2 at 0x000001CC35113E18>: division by zero
Body:
def test2(): <------------------ 果然顯示出了 除0 的 source code
1/0
Continue, Debug, or Abort? [C/d/a] a
OK see test2
{
... snip...
"cfa": 715
}
------------ Definition in dictionary ------------
00715: def test2():
1/0 (<class 'function'>)
2 0 LOAD_CONST 1 (1)
2 LOAD_CONST 2 (0)
4 BINARY_TRUE_DIVIDE
6 POP_TOP
8 LOAD_CONST 0 (None)
10 RETURN_VALUE
00716: RET (<class 'NoneType'>)
------------ End of the difinition ---------------
OK
--> 即使在 interpret state 也不一定讓 </py> 來報錯(描述不精確),如下:
OK f py: execute(pop())
Callable in phaseB <function test2 at 0x000001CC35113E18>: division by zero
Body:
def test2(): <----------------- 直接就看到真正的 source code
1/0
Continue, Debug, or Abort? [C/d/a]
--> try: exception: 以後繼續改進。。。。。。
[x] multiple lines of tib. are not showing correctly.
--> try test.f
111 tib.
222 tib.
333 tib.
--> I've got it. From clipboard is ok, from accept2 is not.
OK ^D
111 tib.
222 tib.
333 tib.
^D
111 \ ==> 111 (<class 'int'>)
111 \ ==> 222 (<class 'int'>)
111 \ ==> 333 (<class 'int'>)
OK
--> fixed
[x] RET at end of dictionary is expected but missing <--- problem!!
--> improve (dump) d dump --> ok now
[x] Oh, my God! peforth can be a debugger or 內視鏡 of python:
<py>
any python code; peforth is available e.g. push()
push(123);import peforth;peforth.main() # enter peforth break point, wonderful !!
</py>
--> The way to enter peforth interpreter is not very good, though it's clear.
--> ok now, the breakpoint usage is :
push(locals());ok('111>>')
==> python -i 本來就可以回到 phthon interpreter 以便進行靜態分析執行結果。
寫就 endo.py ( see my ynote) 當作 pdb 的另一選擇,在斷點上查看程式當時狀態。
[x] 手動 install peforth 的方法 see my ynote
[x] peforth package 裡面 __init__.py 就是 peforth.py 也就是 __main__.py
[x] 這時候要解決的是 peforth.f , quit.f 的 path , 用 __path__[0] 即可。
[x] import projectk.py as vm 要改成 from . import projectk.py as vm 把 path 指定清楚
[x] projectk.py 裡面用 vm = __import__(__name__) 在 package 裡不適用
改由 __init__.py 來填 vm.vm = vm 即可。
==> 成功了 !
手動安裝
========
1. 把本 project 的四個檔案 projectk.py quit.f peforth.f __main__.py 全部 copy 到如下新創建的 folder: c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth
2. 把其中 __main__.py 多 copy 一份成 __init__.py 即可。
執行 peforth 有四個方式
=======================
1. 從 project folder 下執行 python __main__.py OK 後打 : test .’ hello world!’ cr ; test 印出 hello world! 打 bye 離開。
2. 從 project folder 外面執行 python peforth OK 後打 : test .’ hello world!’ cr ; test 印出 hello world! 打 bye 離開。
3. 安裝好 peforth package 之後,任意 folder 下執行 python -m peforth 後同上。
4. 安裝好 peforth package 之後,任意 folder 下執行 python 然後 import peforth 然後按照指示打 peforth.main() 進入 peforth 後同上。
[x] why peforth? why endo.py? 一個 object 用來保存被觀察的 locals 不就好了?
1. indent 自由
2. 現成的 tool, forth 可以記住很多命令, 複雜的 command 可以臨時組合而成
[x] peforth 既然可以是個 python debug 學習工具,拿 peforth 來當 breakpoint 就要盡量簡單。
--> The REPL, peforth.main(), renamed to peforth.ok()
REPL, or Read-Eval-Print-Loop.
--> peforth.ok(prompt='OK ',loc={}) for user to specify the prompt and giving the locals
at the moment.
--> at the point ok() started, TOS is the tuple with information from the caller.
The data stack was supposed to be empty, here after it won't be.
--> The TOS provides the prompt, the locals
[x] debug command 不要了, 會跟 py> debug which is vm.debug 撞名,沒必要增加這個問題。
[X] I found a python problem!!
False==0 is True, False<=0 is True, False<=0.1 is True
False<0.0001 is True, False<-0.1 is False
這是在引用 debug 來篩選哪些 breakpoint 做不做用時遇到的問題。debug 初值為 False 結果
debug<=33 竟然是成立的!
2019/11/25 10:26:06
[x] ." a" prints an extra space <--- problem
RI: dot . command 早期為了 debug 好看,有多印一個 space 可以不要了。
[x] peforth.path to indicates the home directory where peforth.f is
[x] IDLE generates keyboardinterrupts
try-except can fix it http://effbot.org/zone/stupid-exceptions-keyboardinterrupt.htm
--> 改寫 accept 加上了 try-except 檢查避免被 IDLE resize window 時的 KeyboardInterrupt
意外甩出。
--> resize window 的 KeyboardInterrupt 好了,但是 Ctrl-D 不能用,要輸入 multi-lines 可
改用 <accept> ... </accept> tib.insert 代替。
[x] peforth 的 version 在 whl 打包時要如何統一定義來源?
本文 "Single sourcing the version" 提供多種選擇。
https://packaging.python.org/guides/single-sourcing-package-version/#single-sourcing-the-version
我選用了 version.txt 檔案的方法,好像與 jeforth.3we 類似。
peforth/version.txt 只有一行 python statement 讓相關的單位都來參考它。
[X] 因此今後 projectk.major_version 就留在 projectk.py 裡沒有直接用到了。
__version__ = "1.02"
試出適合 setup.py 使用的 experiments 如下:
dropall cls
<accept>
<py>
loc = {} # locals
with open(v('package-directory')+"peforth\\"+"version.txt") as fp:
exec(fp.read(),{},loc )
# later on we use: loc['__version__']
push(loc)
print('loc[\'__version__\'] is ',loc['__version__'])
</py>
</accept>
tib.insert
.s
實際在 setup.py 裡的程式:
loc = {} # locals
with open("peforth/version.txt") as fp:
exec(fp.read(),{},loc ) # later on we use: loc['__version__']
version=loc['__version__'] # Refered in setup(...) as an argument
在 peforth/__main__.py 裡的程式:
# Get version code from peforth/version.txt for whl package
# to see the single source of version code.
exec(readTextFile(path + "version.txt"),{},locals())
vm.version = __version__
[x] Improve (see) to see source code from project-k
OK py> reset (see) <--- no good so far
{
"__class__": "function",
"__module__": "peforth.projectk"
}
py> reset.__doc__ tib. \ ==> None (<class 'NoneType'>)
py> reset.__code__ tib. \ ==> <code object reset at 0x000001CE712C2810, file "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 42> (<class 'code'>)
__code__ is the chance to improve.
[x] (see) only sees class and module, that can be improved to include some more e.g. __code__
==> circular reference detected 無法解決, 暫用 .members .source 應付。
[x] 決心用 3hta 寫一個 pykb.f 專門用來給 peforth 當 keyboard input to support multiple lines
find the process ID of peforth for sendkey
s" where name like '%python%'" see-process
--> 已經完成 include pykb.f 之後,用 {F7} 把 inputbox 下給 python
[x] T550 上 activate-shell 無效,sendkeys 還好。但是 git.f 卻又好好的。
--> 似乎從 __main__.py 直接執行的 python 是切不過去的,經由 DOS Box 跑起來的才可以。
> include git.f \ 對照看看為何人家可以?
> s" where name like '%python%'" list-them
python.exe:8212 \ 查出 python (直接 double click __main__.py 起來的)
> WshShell :: appActivate(8212)
> launch-git-shell
> shellId . ==> 1608 \ 查出 git shell
> WshShell :: appActivate(1608) \ 這個可以切過去
> WshShell :: appActivate(8212) \ 這個就不行
--> 如果退出 python 則該 DOS Box 能 activate 嗎?
> s" where name like '%cmd%'" list-them
TOTALCMD64.EXE:20780
cmd.exe:22848
cmd.exe:9556
> WshShell :: appActivate(20780) 可以切到 total commander
> WshShell :: appActivate(22848) 可以切到剛退出 peforth 的 DOS Box
> WshShell :: appActivate(9556) 這個不知是啥,切不過去!
用 see-process 看進去,竟然可能是 Google Chrome 的東西
string Name; cmd.exe
uint32 ProcessId; 9556
string Caption; cmd.exe
string CommandLine; C:\WINDOWS\system32\cmd.exe /d /c "C:\Users\hcche\AppData\Local\youdao\Dict\Application\stable\text_extractor_host.exe" chrome-extension://aohddidmgooofkgohkbkaohadkolgejj/ --parent-window=0 < \\.\pipe\chrome.nativeMessaging.in.53dc641bdd08e0c9 > \\.\pipe\chrome.nativeMessaging.out.53dc641bdd08e0c9
string CreationClassName; Win32_Process
--> 所以切不到某些 process 是有的,何解?
進一步研究發現,這個 python 是從 Anaconda3 run 起來的
> s" where name like '%python%'" list-them
python.exe:20092
> WshShell :: appActivate(20092)
string Name; python.exe
uint32 ProcessId; 20092
string CommandLine; C:\ProgramData\Anaconda3\python.exe "C:\Users\hcche\Documents\GitHub\peforth\__main__.py"
--> Not root cause. 即使 Anaconda 的 python 也能切過去,只要。。。
--> 把 Title 改成 peforth 吧!看看是否改得到所在的 cmd or powershell
DOS command c:\> title titlename can change the doxbox title but it's not
a process attribute so it doen't help.
--> 所以答案是: 直接跑 __main__.py 或經過 dosbox 都可能行或不行,
process ID 可以用 nnnn to processid 指定的,就算了吧!
--> 多印些 info 讓 user 自己手動設 processid, Done!
[x] improve .members --> __class__ attribute can easily be circularly deep and long
m py> inspect.getmembers(pop()) py> str(pop()) tib.
[x] try to str(obj) then json.loads(string) and then json.dumps
--> str() generates non-json 不行!
--> 暫時放棄了
[x] C:\Users\hcche\Documents\GitHub\Morvan\tutorials\tensorflowTUT\tensorflow6_session.f
如何一口氣把所有的 python section variables 都變成 forth values?
l :> keys() tib. \ ==> dict_keys(
['result2', 'result', 'sess', 'product', 'matrix2', 'matrix1', 'tf']
) (<class 'dict_keys'>)
--> 要能 programmatically 產生 constant --> 改寫 constant 得 (constant)
: (constant) ( n "name" -- ) // Create a constnat
(create) <py>
source = ' push(getattr(vm,"{}")["{}"])'.format(current, last().name)
last().xt = genxt('constant',source)
if not getattr(vm,current,False): setattr(vm,current,{})
exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name))
</py>
reveal ;
OK 123 char x (constant)
OK x . ==> 123 OK
--> 一把就成功了! 能不能用在 colon definition 裡面?
: test 234 char y (constant) ;
test
y . ==> 234 成功!
--> 有了 (constant) 應該就可以自動產生所有的 locals() 了
==> ok now! vm.outport(loc) defined in quit.f
[x] Install peforth from source
---- 早期 (1.22 版以前) 不懂得用 python setup.py install 時的替代方法 ----
a command to update the peforth module
@ c:\Users\...\Python36\Lib\site-packages\peforth\..
Get the path
import os.path as ospath
# py> pdb :> __file__ tib. \ ==> C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\pdb.py (<class 'str'>)
# py> ospath.dirname(pdb.__file__) tib. \ ==> C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib (<class 'str'>)
# py> ospath.split(pdb.__file__) tib. \ ==> ('C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib', 'pdb.py') (<class 'tuple'>)
# py> ospath.splitdrive(pdb.__file__) tib. \ ==> ('C:', '\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb.py') (<class 'tuple'>)
# py> ospath.splitext(pdb.__file__) tib. \ ==> ('C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb', '.py') (<class 'tuple'>)
# py> ospath.splitunc(pdb.__file__) tib. \ ==> ('', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb.py') (<class 'tuple'>)
py> ospath.dirname(pdb.__file__)+"\\site-packages\\peforth\\" ( targetPath )
getenv(key, default=None)
Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str.
getenv compare with py> ospath.dirname(pdb.__file__)
if same then proceed the patch program to copy all files
if not then warning and stop
算了,直接 copy 就好了
------ update.bat ------
set pythonlib=C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib
copy -y version.txt %pythonlib%\site-packages\peforth
copy -y projectk.py %pythonlib%\site-packages\peforth
copy -y __main__.py %pythonlib%\site-packages\peforth
copy -y __init__.py %pythonlib%\site-packages\peforth
copy -y peforth.f %pythonlib%\site-packages\peforth
copy -y quit.f %pythonlib%\site-packages\peforth
------ ------ ------ ------ ------
[x] 發現 pip help install 列出了 pip install 的種種用法。
update.bat 直接從 project directly update peforth package 到
lib\site-packages\peforth 的方式太暴力了。
--> Try this example from pip help install :
pip install [options] [-e] <local project path> ...
[X] 有待研究 14:33 18/05/21 v1.16 試用結果,失敗:
c:\Users\hcche\Documents\GitHub>pip install -e peforth
Obtaining file:///C:/Users/hcche/Documents/GitHub/peforth
Missing build time requirements in pyproject.toml for file:///C:/Users/hcche/Documents/GitHub/peforth: 'setuptools' and 'wheel'.
This version of pip does not implement PEP 517 so it cannot build a wheel without 'setuptools' and 'wheel'.
Installing build dependencies ... done
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\hcche\Documents\GitHub\peforth\setup.py", line 9, in <module>
with open("peforth/version.txt") as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'peforth/version.txt'
----------------------------------------
Command "python setup.py egg_info" failed with error code 1 in C:\Users\hcche\Documents\GitHub\peforth\
c:\Users\hcche\Documents\GitHub>
[x] Ynote: "研究 install peforth from source 的方法" 已經成功。
[/] jump to 遙遠的下面 "---- 2018.12.15 懂得用 python setup.py install 需要修改 ----"
-
[/] 螢幕編輯器
os.get_terminal_size(...)
Return the size of the terminal window as (columns, lines).
[x] (forget) 有 error
'Word' object has no attribute 'cfa' <-- 用 getattr(obj,name,None) 即可。
[x] peforth 1.3 uploaded to pypi. 準備來寫 wiki 介紹怎麼
應用 peforth 來學習 TensorFlow.
--> Done https://github.com/hcchengithub/peforth/wiki/Example-4-Examine-a-Machine-Learning-exercise
[x] 繼續完成 peforth.f 的 selftest 元件
--> string 轉譯成 array [d ... d] [r ... r] 要用到
: test2 char 123,456 s" [{}]" :> format(pop()) py> eval(pop()) ;
--> String.indexOf 改成 String.find
\ Selftest 要 redirect print() 方便取得並檢查螢幕輸出的內容。
\ 這是個 redirect print() 的有效範例
\ Selftest 要 redirect print() 方便取得並檢查螢幕輸出的內容。
\ 改寫成輸出到 buffer. See http://www.cnblogs.com/turtle-fly/p/3280519.html
<accept>
py> [""] value screen-buffer // ( -- 'string' ) Selftest screen buffer
<py>
class Screenbuffer:
def __init__(self,buf):
self.stdoutwas=sys.stdout
self.buffer=buf
def write(self, output_stream):
self.buffer[0] += output_stream
def view(self):
self.stdoutwas.write(self.buffer[0])
def reset(self):
sys.stdout=self.stdoutwas
vm.Screenbuffer=Screenbuffer
# redirection
sys.stdout=Screenbuffer(vm.forth['screen-buffer'])
# print to screen buffer
sys.stdout.stdoutwas.write("-------1111-----\n")
print( 'hello')
print( 'world')
sys.stdout.stdoutwas.write("-------2222-----\n")
# view screen buffer
sys.stdout.view()
# reset
sys.stdout.reset()
outport(locals())
</py>
</accept>
tib.insert
[x] 探討,整理,討論幾種產生 function 或執行 inline python code 的方法
1. projectk.py genxt() 有 __doc__ 專為 code word xt 硬性 _me argument
2. projectk.py genfunc() 有 __doc__ 一般用途 name args body
3. peforth.f compyle 產生一般用途的 annonymous function 沒有 args
4. <py>...</py> 前後都是 immediate 正常使用沒問題。但若想先組合好 source code 再讓
</py> or </pyV> 去執行,就有變化了。以下 try, try2 兩個都是有意義的、可以的
OK : try char 123 [compile] </pyV> ;
OK try .
123OK
OK : try2 [ char 123 ] </pyV> ;
OK try2 .
123OK
但是下面這個其實是不知所云的:
OK : try3 char 123 </pyV> ;
其結果也是莫名其妙的:
Error! try3 unknown.
OK
5. 直接用 exec(), eval() 執行臨時組合出來的 string, e.g. [r [d [p 的定義。
6. 直接用 compile(), genfunc() 可能不會有,吧?
[x] 有很嚴重的 bug
OK : test <py> 123 </pyV> ;
OK see test
...snip...
------------ Definition in dictionary ------------
00784: def compyle_anonymous():
push(123 ) (<class 'function'>)
2 0 LOAD_GLOBAL 0 (push)
2 LOAD_CONST 1 (123)
4 CALL_FUNCTION 1
6 POP_TOP
8 LOAD_CONST 0 (None)
10 RETURN_VALUE
00785: RET (<class 'NoneType'>)
------------ End of the difinition ---------------
OK
OK : test py> "abc" ;
reDef test
OK see test <----------- 沒反應!
OK ' test (see) <----------- 沒反應!
OK ' test . <----------- 沒反應!
--> 順序倒過來怎樣? 先試 : test py> "abc" ;
--> OK 一切正常
--> 再一個空的東西 : nothing ; --> 也正常!
--> 就是不能有 inline python? : test2 py> 1234 ;
--> OK 一切正常
--> 整個重來,那這樣呢?
: test <py> 123 </pyV> ; : test2 py> "abc" ;
--> 都 OK, 算了,不了了之。可能是寫 selftest 捅出來的哈哈題。
[x] python or javascript can't access by address then how to
access by reference instead of access by value? (call by name call by address call by reference)
昨天寫 selftest 為了取得 screenbuffer 就是得定義成
py> [""] value screen-buffer // ( -- ['string'] ) Selftest screen buffer
而非
py> "" value screen-buffer // ( -- 'string' ) Selftest screen buffer
否則會 access 不到這個特定的 string.
[x] 照著 MetaMoji 2017-9-17 15:15 的討論, 研究把 <selftest> sections 都 dump 出來的辦法。
--> 從 quit.f 裡一查即知, 應該是一行解決:
py> tick('<selftest>').buffer char peforth-selftest.f writeTextFile stop
--> 成功!
--> 此後就是改寫 peforth-selftest.f 而已。
[x] (constant) 遇到 reDef writeTextFile 會議常中止 --> 不能用 panic 警告,用 print 即可。
[x] About to release peforth v1.4
1. py:~ py>~ ::~ :>~ are so good to have.
2. selftest not completed yet but nice to have some
Release steps see Ynote: "Pack peforth to peforth.whl" > 打包步驟。
[x] v1.4 released, from now on v1.5
[x] 有了 argv 就不要有 greeting 也不要 reDef warnings.
--> 所以要提早取得 command line, quit.f 太晚了。
--> Done!
[x] PyPI README.rst 有辦法了 可查看 rst2html 也可以 convert from markdown
https://stackoverflow.com/questions/26737222/pypi-description-markdown-doesnt-work
--> 先用 pypandoc module 用轉的看看
py:~ import pypandoc; push(pypandoc)
constant pypandoc // ( -- module )
pypandoc :: convert('README.md','rst')
Failed in </py> (compiling=False): No pandoc was found:
either install pandoc and add it to your PATH or or call
pypandoc.download_pandoc(...) or install pypandoc wheels
with included pandoc.
--> OK pypandoc :> download_pandoc py: help(pop())
Help on function download_pandoc in module pypandoc.pandoc_download:
download_pandoc(url=None, targetfolder=None, version='latest')
Download and unpack pandoc
Downloads prebuild binaries for pandoc from `url` and unpacks it into
`targetfolder`.
:param str url: URL for the to be downloaded pandoc binary distribution for
the platform under which this python runs. If no `url` is give, uses
the latest available release at the time pypandoc was released.
:param str targetfolder: directory, where the binaries should be installed
to. If no `targetfolder` is give, uses a platform specific user
location: `~/bin` on Linux, `~/Applications/pandoc` on Mac OS X, and
`~\AppData\Local\Pandoc` on Windows.
OK pypandoc :: download_pandoc()
* Downloading pandoc from https://github.com/jgm/pandoc/releases/download/1.19.2.1/pandoc-1.19.2.1-windows.msi ...
--> download 半天下不來。。。很煩
--> http://pandoc.org/ 有 online converter , 分小段手動把 README.md 轉成 README.rst 吧!
pandoc.org 專門做各種文檔格式轉換。
--> Online reStructuredText editor http://rst.ninjs.org/
--> Yes!!
[x] Release v1.5
[x] 把 update.bat setup.bat setup.py 等等統一起來
--> 抄 3we 的 setup.bat
--> done!
[x] Example in comment of the "words" command needs improvement
--> 整個改良了,如今可以接受 pattern
[x] alias 要繼承原來的 help & comment 嗎? 整個檢查看看。。。
--> 要,但是 // 改成只有現有的 help 是 "(...)" 才用尾綴的,否則都用取代的。
[x] Bug: (see) unexpectedly leaves the given tos on the data stack if it's not a Word.
[x] 發現 python 應該也能執行 WshShell 因此可能不用靠 jeforth.3hta pykb.f
[x] 錄 elearning 介紹 peforth
[ ] wiki 介紹 py: help(genxt) py> genxt .source ' + . members 等好用的東西
--> 唉,當時的層次真是,不好說啊!好是好,推薦自己的好東西,沒有照
顧 user 的需求。
[X] 把網頁或至少 3hta 變成 peforth 的 input box, 解決 multiple line input 的問題。
--> 從 peforth 去 launch 3hta include pykb.f
--> python 能不能知道自己是誰 run 的?如果知道,就可以解決 Wsh.sendkey() 不知往哪 send
的問題。
--> 有 ^D, ipython, jupyter notebook 等方法了。
[x] peforth.f selftest almost done, still many to go:
<py> [w.name for w in words['forth'][1:] if 'pass'!=getattr(w,'selftest',False)] </pyV> cr . cr
[x] 最進新發現的 bug 特別要加進 selftest
--> (see) none Word 之後 stack 沒清乾淨 --> 有兩條路的 words 都可疑!
--> 甚至,例如 words 最後有多印一個 print() 也是測試的重點。
[/] readTextFile, writeTextFile 好像都還不能用 -- haha bug 被 inport 取代掉的
[x] display-off 之後 on 不回來,突然發生,很奇怪。沒了 display 不好 debug.
display-off 之內如果是 ." hello world" 就沒問題。
是 words help 才有問題,而且是卡在 words 裡面了 <== 因為 words 出錯,導致 display-on
沒 run 到。只要在 words 後面加上 e 就好了。表示是 nexttoken() 又出問題了,從
檔案裡執行(而非console command line)時它會繞過 CRLF 往下抓到下一個 token 因此
程式都亂了,特別是被他抓走的正好就是 display-on 螢幕都沒了,所以難搞。
這類 nexttoken 可有可無的命令在 selftest 時都可能有問題。
應該都會被挑出來。
[x] review 所有用到 word 以及 nexttoken() 的地方。。。
[x] python -i -m peforth version
exit 之後會有 error ---> 不見了!可能是 WshShell win32 package
--> 又來了!!!
c:\Users\hcche\Documents\GitHub\peforth>python -i __main__.py
p e f o r t h v1.07
source code http://github.com/hcchengithub/peforth
*** Start self-test
*** End of peforth.f self-test ... pass
OK bye
Traceback (most recent call last):
File "__main__.py", line 129, in <module>
ok()
.... snip ......
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\_sitebuiltins.py", line 26, in __call__
raise SystemExit(code)
SystemExit: None
>>>
--> can repro? python -i __main__.py and bye ... Yes, repro'ed
--> OK py: exit() <--- can repro
OK py: exit(0) <--- repro too!!!
--> can it repro on a simplified ~.py instead of peforth?
--> Yes!! as simple as only one statement in test.py :
c:\Users\hcche\Downloads>type test.py
exit()
c:\Users\hcche\Downloads>python -i test.py
Traceback (most recent call last):
File "test.py", line 1, in <module>
exit()
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\_sitebuiltins.py", line 26, in __call__
raise SystemExit(code)
SystemExit: None
>>> exit()
c:\Users\hcche\Downloads>
--> It's not a problem. -i switch in command line normal behavior.
--> bye to use os._exit(0) instead of exit() can fix the problem.
[x] exit command to set vm.exit=True to stop the ok() in __main__.py
[/] add bp('prompt') in addition to ok() to avoid the unnecesary awkward
breakpoint instruction
--> Listen to users, don't assume. ok(prompt,loc,cmd) arguments are
all very useful.
[x] how to get vm's parent? so as to show greeting message differently
for different situations. i.e. ok() or peforth.ok() to enter peforth
interpreter
--> 本來的目的不知能不能達到,有 parent 的 data 總是好的。
[x] Bug found
c:\Users\hcche\Documents\GitHub\peforth>python -i -m peforth exit
OK
OK <=== python interpreter prompte expected
--> 因為 vm.exit 有兩個!!!!
peforth module __init__.py __main__.py 的關係不是一個!!!
module 裡面的 __main__.py 專供 -m 執行用,改寫看看。。。。
==> 簡化整個執行方式,決心放棄從 project folder 執行。 ---> 2019-05-11 重新 study 有成
只保留 import peforth 或 python -m peforth 兩種。
--> Since commit c3d7677 on Oct 8, 2017
[x] 因應新檔案配置,setup.bat 的自動化晚點再做 --> Done
[x] Tests before a Release v1.07
[x] 所有 run 法帶 selftest 跑一遍
[x] Run setup.bat 做出有 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. __main__.py [/] selfttest [/] greeting [/] exit [/] bye
[x] 2. python __main__.py version drop [/] .s words [/] exit [/] bye
[x] 3. python -i __main__.py [/] selfttest [/] greeting [/] exit [/] bye
[x] 4. python -i __main__.py version drop [/] .s [/] exit [/] bye
[x] 5. python -i -m peforth [/] selftest .s words exit
[x] 6. python -i -m peforth version drop
[x] 7. python import peforth
[/] selftest peforth.ok() .s words <--- no parent
[/] 1234 bye check echo %errorlevel%
[x] 所有 run 不帶 selftest 再跑一遍
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. __main__.py [/] selfttest [/] greeting [/] exit [/] bye
[x] 2. python -i -m peforth [/] selftest .s words exit bye
[x] 3. python -i -m peforth .' Hello World!!' cr bye
[x] 4. python import peforth
[x] 考慮 README.rst 改良
--> GitHub 版的先弄好
[x] hello world
Ynote: 草稿 peforth wiki article hello world _wiki_
[x] README.md --> README.rst by http://rst.ninjs.org
[x] These words should be moved into selftest section
'description', 'expected_rstack', 'expected_stack', 'test-result',
'[all-pass]', '***', 'all-pass', '[r', 'r]', '[d', 'd]']
[x] while display-off dispaly-on should be moved out!
[x] a new word to include python file directly -- pyclude
supports commands after #__peforth__ comment by simply removing
all #__peforth__
Also comment out "from __future__ import print_function" lines
1. read the file
2. find all #__peforth__ replace with null
3. find "from __future__ import print_function" comment it out.
4. -indent indent
5. add <py> and </py>
6. tib.insert the string
: pyclude ( <pathname.py> -- ... ) // Run the .py file in a <PY>..</PY> space
CR word readTextFile py> re.sub("#__peforth__","",pop())
py> re.sub(r"(from\s+__future__\s+import\s+print_function)",r"#\1",pop())
-indent indent <py> " <p" + "y>\n" + pop() + "\n </p" + "y>\n" </pyV>
tib.insert ;
/// Auto-remove all #__peforth__ marks so we can add debug
/// statements what are only visible when debugging.
/// Auto comment out "from __future__ import print_function"
/// that is not allowed when in a <PY>..</PY> space.
[x] tib.insert is dictate now, an alias.
[x] Tests before a Release v1.08
[x] 所有 run 法帶 selftest 跑一遍
[x] Run setup.bat 做出有 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. python -i -m peforth [/] selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 所有 run 不帶 selftest 再跑一遍
[x] Run setup.bat 做出取消 selftest 的 wheel <-- 注意!改的是 site-packages\peforth
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. python -i -m peforth [/] selftest .s words exit bye
[x] 2. python -i -m peforth .' Hello World!!' cr bye
[x] 3. python import peforth
[x] 考慮 README.rst 改良
[x] version.txt advanced to v1.09
[x] the way I get the path is not good, data files are in a separated folder
in ubuntu. I have to manually copy data files to lib/python3.5
Copy : none .py files are in ~/.local/lib/site-packages/peforth
peforth.f peforth.selftest quit.f version.txt
To : .py files are in ~/.local/lib/python3.5/site-packages/peforth
__init__.py __main__.py projectk.py
Solutions I found on Stackoverflow are bad, do it manually is fine.
[x] A wiki page discusses this. done.
[/] 有機會解掉了。Search my Ynote: "2018/01/17 16:39 插曲,意外發現查出 python
的東西都放哪裡的方法了!peforth 在 Ubuntu 上跑可能有救了。_peforth_
_ubuntu_"
[/] Study this :
c:\Users\hcche\Documents\GitHub\DeepSpeech>py -m site
sys.path = [
'c:\\Users\\hcche\\Documents\\GitHub\\DeepSpeech',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\python36.zip',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\DLLs',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\win32',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\win32\\lib',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\Pythonwin',
]
USER_BASE: 'C:\\Users\\hcche\\AppData\\Roaming\\Python' (doesn't exist)
USER_SITE: 'C:\\Users\\hcche\\AppData\\Roaming\\Python\\Python36\\site-packages' (doesn't exist)
ENABLE_USER_SITE: True
c:\Users\hcche\Documents\GitHub\DeepSpeech>
[/] 從 c:\Users\hcche\Documents\GitHub\Morvan\tutorials\tensorflowTUT\tf17_dropout\full_code-2.py
裡,開發 harry_port() 的經驗看來,有了這麼強大的工具之後,用它臨時定義出來的
words 不希望隨著 breakpoint 結束而被 --- marker --- 清除。怎麼辦?
1. 要保留的東西放到 tutorial 的前面,或先 include 另一個 tool kit
--> 這個好!
2. 如果不用 marker (因為我的 marker 太強了,跨 vocabulary 全清!)
就是要有 forget 能單清本 current vocabulary 的 words 到 ─── 為止。
3. 而且要有 vocabulary, 把要保留的 words 定義到 root 去,平時在 tutorial
vocabulary 工作。
[x] 這個 interpreter for loop 有何問題?
OK 3 [for] t@ 100 + int digit [next]
Failed in </py> (compiling=False): pop from empty list
Body:
push(pop().data[pop()])
OK
==> 問題可能是出在 digit 裡面用到 <text>...</text> dictate 的 macro 形式
證實了,因為不用該 macro 就好了
[/] harry_port() 的使用技巧,要寫進 help 裡!像這個例子,不能用 <py>...</py> block
因為它會先 compile 而這個應用不能先被 compile :
OK <text> locals().update(harry_port());
batch_X, batch_Y = mnist.train.next_batch(100); outport(locals()) </text>
py: exec(pop())
[x] exit 不夠力,會往下做。要再補個 stop 才行。
code stop reset() end-code // ( -- ) Stop the TIB loop
code exit
if compiling: comma(EXIT)
else: vm.exit=True ; reset() <---- 補 reset() 即 stop
end-code immediate
// ( -- ) Exit this colon word.
靠!意外發現這個 bug !! 其實早就看到 exit 之後會暴衝,沒太在意。
[x] <accept> nop </accept> 同一行不行,要改良嗎? ---> Done!
[x] Tests before a Release v1.09
[x] 所有 run 法帶 selftest 跑一遍
[x] Run setup.bat 做出有 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [/] selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 所有 run 不帶 selftest 再跑一遍
[x] 注意!改的是 site-packages\peforth\quit.f 所以要
在 setup.bat 做 wheel 以前插入這個動作!!!!!
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] selftest .s words exit bye
[x] 2. python -i -m peforth .' Hello World!!' cr bye
[x] 3. python import peforth
[x] 考慮 README.rst 改良
[x] version 改成 1.11 (必須跳過 1.10 會變成 1.1)
[/] -indent 可以更聰明一點,目的讓 <text>...</text> 內部更自由。
當 </text> 所在行是 blank line 時,就用它的長度當作 -indent 的最小值,這
麼一來 <text> 之後就可以接著放東西。那它的 space 數比 </text> 之前小,就
會被「加長」到「最小值」。這樣更自由。
[x] exit stop 之外,還需要一個中止 including 的方法。或者是仔細定義 stop, exit
的差別或者合併。vm.exit 是給 ok() 看的,很明顯它用來回到 python interpreter
這已經有點頭痛了,因為 exit 同時也是給 inner loop 看的 instruction 跟 RET
等效。意思是,如果 exit 再有別的意思,恐怕連我自己都糊塗了。那只剩 stop 了,
stop 用來打斷 outer loop 也很明確。所以,需要新的 word ... break-include
因為 sinclude 是用 dictate 來處理 .f file 的,可能把 ntib 改一下就有 break-include
的效果了,試試看,把斷點斷在 xray.f 裡查看半路的 tib 含不含 tutrial 。。。
---> Bingo!!
: break-include ( -- ) // Break including .f file
py: vm.ntib=len(tib) ;
stop 就是 reset()
exit 在 comiling 時是 EXIT==RET; 否則就是 vm.exit=True 而已,把 ok() 停下來。
2020/06/03 10:34:10 該為 proeforth 寫了個 skip2 更有彈性。
[x] peforth 可以用來幫 .py import modules
py> os.getcwd() constant working-directory // ( -- "path" ) Tutorial home directory saved copy
\ my MNIST_data directory is there
cd c:\Users\hcche\Downloads
py:~ from tensorflow.examples.tutorials.mnist import input_data as mnist_data; push(mnist_data)
parent :: ['mnist_data']=pop(1) \ pop(1) 很傷腦筋, in-line 要還原成 python 才看得懂。
[x] *debug* 改寫, 不要用 pdb.set_trace() 了
不用 import 就使用 pdb 的方法
py: sys.modules['pdb'].set_trace()
: *debug* ( <prompt> -- ... ) // FORTH breakpoint
BL word ( prompt ) py: ok(pop(),cmd="cr") ;
/// How to invoke pdb:
/// py: sys.modules['pdb'].set_trace()
[x] now 11 *debug* >> 22 <== but 22 got skipped ! <----- problem
--> fixed
[x] *debug* can not be used in compiling mode (colon definition) yet
because the following prompt needs to read tib immediatedly
[x] Bug found,
OK help a
Word in phaseB <Word 'help'>: 'int' object has no attribute 'help'
help improved
[x] new word "import" works fine
[x] new word __main__ works fine
s" dos title " __main__ :> __file__ + CRLF + dictate drop
Note! 如果沒有 CRLF 則 dos 會抓到 dictate 之後去,連 drop 都當成 command line 的一部份
[x] release 1.11
new words import, __main__, break_include, and improved *debug* and help
[X] ( ... ) comment nested v1.23
[x] CRLF leaves '\r\n' on TOS
[x] Ignore command line when running in jupyter notebook
(Pdb) vm.commandline
'-f C:\\Users\\hcche\\AppData\\Roaming\\jupyter\\runtime\\kernel-be1c3297-f7a9-4cb2-a7aa-b06e29f158ea.json'
(Pdb) sys.argv
['c:\\users\\hcche\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py', '-f', 'C:\\Users\\hcche\\AppData\\Roaming\\jupyter\\runtime\\kernel-be1c3297-f7a9-4cb2-a7aa-b06e29f158ea.json']
(Pdb) sys.argv[0]
'c:\\users\\hcche\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py'
(Pdb) sys.argv[0].endswith('ipykernel_launcher.py') --> True , the key to know about the case
跑 jupyter notebook 又發生 Error! -f unknown. 的問題。先前是因為
jupyter notebook 下 import peforth 會有 unexpected command line 如上。
這不是改好了嗎? --> 光排除 py> sys.argv[0].endswith('.py') 不夠
py> sys.argv[0].endswith(('.py','.ipy','.ipynb'))
[x] itchat 執行中,常有這個問題發生:
Traceback (most recent call last):
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 342, in outerExecute
f = float(token) # triggers exception if token is malformed
ValueError: could not convert string to float: '<mmreader><category'
為何 try: exception: 攔不住它?
Reproducing steps (at home on my desktop) :
c:\Users\hcche\Documents\GitHub\ibrainfuck\bfinterpreter>python v1.12 at home
>>> import peforth
>>> peforth.ok()
OK sys . cr
Traceback (most recent call last):
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 342, in outerExecute
f = float(token) # triggers exception if token is malformed
ValueError: could not convert string to float: 'sys'
終於找到複製方法了。。。。
--> 改寫 projectk.py > outer() 之後好了。
[x] study how to run brainfuck interpreter
c:\Users\hcche\Documents\GitHub\ibrainfuck
--> See Ynote __brainfuck_
[x] 因 bug 發現 harry_port() 的更佳用法 (quit.f updated)
\ Study 三寶
\ 1. DOS Box title
import peforth; peforth.ok(loc=locals(),cmd="include xray.f")
\ 2. Breakpoint
peforth.ok('11> ',cmd="parent inport")
\ 3. Lab of copy-paste
<accept> <text>
# ---------------------------------------------------------------------------
all locals() can use
# ---------------------------------------------------------------------------
</text> -indent py: exec(pop(),harry_port()) # If only globals is given, locals defaults to it.
</accept> dictate
[x] msg is a forth value and also a peforth global.
blabla bla something wrong.
--> 不是因為繼承 JavaScript 的想法,object 與 dict 不分所造成的混淆。
(::) (:>) 都是中性的 obj :: methed 或 obj :: ['property'] 隨人自己
的認知而定,語法並無問題。
[x] Ipeforth kernel for Jupyter is ok now. Bring peforth to
http://nbviewer.jupyter.org/
How to install Ipeforth kernel for jupyter notebook :
Copy kernel.json to here:
%USERPROFILE%\AppData\Roaming\jupyter\kernels\peforth\kernel.json
c:\Users\hcche\AppData\Roaming\jupyter\kernels\peforth\kernel.json
manually create the directory if
%USERPROFILE%\AppData\Roaming\jupyter\kernels\
is not existing.
[x] Tests before a Release v1.13
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [/] no-selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
Run setup.bat 更新本地版本以供測試
[x] 1. python -i -m peforth [/] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] version 改成 1.14 (必須跳過 1.10 會變成 1.1)
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] 讓 jupyter feature peforth --> 已經加進 jupyter 的 kernel list:
https://github.com/jupyter/jupyter/wiki/Jupyter-kernels
[ ] Like harry_port that brings all wanted variables to projectk
How to make it easier?
[ ] Study when deep in a certain module, how peforth find and bring in
specified variables?
1. debug the toy.. keras exercise, breakpoint deep in a keras module
2. instead of using the trick of loc={**locals(),**{'foo':foo,'bar':bar}}
try to find foo,bar actual parent
3. access volatile variables out of their scope may not be a good idea
but being able to access them at a peforth breakpoint is necessary.
tensor_shape is imported in C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages\tensorflow\python\keras\_impl\keras\layers\wrappers.py
char input_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
char tf <text> \ global variable
__main__ :> {0} constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
* 1. char foobar module ( module )
2. py: setattr(sys.modules['foobar'].projectk,'foobar',v('foobar')) \ add to peforth
* 1. import numpy constant np // ( -- numpy ) module object, method #1
py> sys.modules['numpy'] constant np // ( -- numpy ) method #2
__main__ :> np constant np // ( -- numpy ) method #3
2. np __main__ :: peforth.projectk.np=pop(1) \ peforth global
np __main__ :: np=pop(1) \ __main__ global, see 'help __main__'
* 3. py: setattr(sys.modules['peforth'].projectk,'np',v('np')) \ alt method
char child_input_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
\ make librosa a global in peforth
char librosa py> tick(tos()) execute py: globals()[pop()]=pop()
\ even simpler way
import librosa constant librosa char librosa librosa py: globals()[pop()]=pop()
char input_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
char tensor_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
char selfLayer <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
import peforth # [ ] _debug_
peforth.ok(cmd='''
0 value Count
none value child_output_shape
exit
''')
try:
child_output_shape = child_output_shape.as_list()
except Exception as err:
peforth.ok('33> ',loc={**locals(),**{'tensor_shape':tensor_shape,'self.layer':self.layer,'err':err}})
locals :: pop('peforth') locals inport
tensor_shape :> TensorShape(v('input_shape')).as_list() constant input_shape2
tensor_shape :> TensorShape([v('input_shape2')[0]]+v('input_shape2')[2:])
constant child_input_shape
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> (?, 2048) (<class 'tensorflow.python.framework.tensor_shape.TensorShape'>)
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> (?, 2048) (<class 'tensorflow.python.framework.tensor_shape.TensorShape'>)
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> None (<class 'NoneType'>)
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> None (<class 'NoneType'>)
[x] jupyter notebook 裡無法 exit , 每次 exit 都會留下一個東西在 stack 裡,出不去。
load> exit
load> .s
0: <IPython.core.autocall.ZMQExitAutocall object at 0x0000020577BF5EF0> (<class 'IPython.core.autocall.ZMQExitAutocall'>)
load>
--> 用 .py 比較看看 --> 沒這問題。
--> 直接進去,直接出來看看 --> 馬上卡住了。
--> 簡化 the peforth cell, 比較結果 ... 在 locals inport 之後多出一個 exit
看起來還是原來的 exit 但多出來就是不對,而且 --- marker clean up 之後好了!
充分證明就是它。
--> 怎麼發生的?--> ipython case 下,當時的 locals() 就是有 exit quit 等一堆東西
正好 exit 撞上了,而 locals :> ['exit'] . cr --> <IPython.core.autocall.ZMQExitAutocall object at 0x000001DBB24B5EF0>
正是那個怪東西。
RI
[ ] 最好 inport 能用挑的。程序如下:
load2> locals keys . cr
dict_keys(['__name__', '__doc__', '__package__', '__loader__', '__spec__', '__builtin__', '__builtins__', '_ih', '_oh', '_dh', 'In', 'Out', 'get_ipython', 'exit', 'quit', '_', '__', '___', '_i', '_ii', '_iii', '_i1', 'tf', '_i2', 'tflearn', '_i3', 'speech_data', '_i4', 'time', 'peforth', 'epoch_count', 'learning_rate', 'training_iters', 'batch_size', 'width', 'height', 'classes', '_i5', 'batch', 'word_batch', '_i6', 'net', 'model', 'x', '_i7'])
\ 從上表裡面挑要用的東西
<py> ['get_ipython', 'tflearn', 'speech_data', 'time', 'epoch_count',
'learning_rate', 'training_iters', 'batch_size', 'width', 'height',
'classes', 'batch', 'word_batch', 'net', 'model', 'x']
</pyV> ( [挑過的keys] )
\ 從 locals 裡面挑這些東西出來
<py> dict([(k,v) for k,v in v('locals').items() if k in tos()])
</pyV> nip ( {挑過的locals} )
\ 可以放心地 inport 成 peforth words 了
inport
[ ] python virtualenv http://docs.python-guide.org/en/latest/dev/virtualenvs/
解決的問題也是 FORTH 的問題,參考人家怎麼解的,可以想想怎麼沿用,看如何只 include 必要的東西。
[x] Ubuntu 的問題好像有解了,
--> Ubuntu 之下
OK site :> USER_BASE . cr 不存在!
/home/hcchen5600/.local
OK site :> USER_SITE . cr 不存在!
/home/hcchen5600/.local/lib/python3.6/site-packages
OK site :> PREFIXES . cr
['/usr', '/usr']
實際東西放在
site.PREFIXES[0] + /local/lib/site-packages/peforth/
--> windows
OK site :> USER_BASE . cr 不存在!
C:\Users\hcche\AppData\Roaming\Python
OK site :> USER_SITE . cr 不存在!
C:\Users\hcche\AppData\Roaming\Python\Python36\site-packages
OK site :> PREFIXES . cr
['C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36']
實際東西放在
site.PREFIXES[0] + /lib/site-packages/peforth/
--> Ubuntu virtualenv
>>> import site
>>> site.PREFIXES
['/home/hcchen5600/GitHub/DeepSpeech', '/home/hcchen5600/GitHub/DeepSpeech']
>>> site.USER_BASE
'/home/hcchen5600/.local'
>>> site.USER_SITE
'/home/hcchen5600/.local/lib/python3.6/site-packages'
實際東西放在
site.PREFIXES[0] + /lib/site-packages/peforth/
也就是
\rootfs\home\hcchen5600\GitHub\DeepSpeech\lib\site-packages\peforth\..
\ Windows 下可 normalize the path
照上面實施, windows 下變成
OK py> path . cr
C:\Users\hcche\AppData\Local\Programs\Python\Python36/lib/site-packages/peforth/
\ 這可以用 ntpath.normpath() 解決
OK import ntpath
OK constant ntpath
OK ntpath dir . cr
['__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', '_get_bothseps', '_getfinalpathname', '_getfullpathname', '_getvolumepathname', 'abspath', 'altsep', 'basename', 'commonpath', 'commonprefix', 'curdir', 'defpath', 'devnull', 'dirname', 'exists', 'expanduser', 'expandvars', 'extsep', 'genericpath', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'os', 'pardir', 'pathsep', 'realpath', 'relpath', 'samefile', 'sameopenfile', 'samestat', 'sep', 'split', 'splitdrive', 'splitext', 'splitunc', 'stat', 'supports_unicode_filenames', 'sys']
OK ntpath :> normpath . cr
<function normpath at 0x000001C511337E18>
OK ntpath :> normpath py: help(pop())
Help on function normpath in module ntpath:
normpath(path)
Normalize path, eliminating double slashes, etc.
OK py> path ntpath :> normpath(pop()) . cr
C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth
OK
\ 或者檢查看是否 Windows
In [8]: sys.modules.get('nt') <--- None 就是沒有,就不是 windows
In [9]: sys.modules.get('sys')
Out[9]: <module 'sys' (built-in)>
In [10]:
\ 更好的方法, yeah! this is it.
-- ubuntu --
In [12]: os.name
Out[12]: 'posix'
-- windows --
OK os :> name . cr
nt
[/] 有了這個 solution 連 jupyter peforth kernel 的 install 都可以自動化了。
[x] Ubuntu 的問題應該已經解決了,要推廣 peforth 必須趕快 release
Tests before a Release v1.14
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 5. repeat 以上 in ubuntu
--> copy the wheel to WSL ubuntu
--> use virtualenv is fine
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
Run setup.bat 更新本地版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] version 改成 1.15 (必須跳過 1.10 會變成 1.1)
[x] 直接用測過的 wheel update Pypi
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] WSL Ubuntu virtualenv weired experience
when pip install peforth in a virtualenv --> permission denied
--> so I use sudo and this will success but peforth will be installed
to global instead of the virtualenv! see https://stackoverflow.com/questions/14665330/pip-requirement-already-satisfied
--> The reason why permission denied was peforth-1.14-py3-none-any.whl which
was copied by windows and it needs chmod 777
\ see the correct example below:
(DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ chmod 777 peforth-1.14-py3-none-any.whl
(DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ pip install peforth-1.14-py3-none-any.whl
Processing ./peforth-1.14-py3-none-any.whl
Installing collected packages: peforth
Successfully installed peforth-1.14
(DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$
[x] peforth.vm.things 的 peforth.things alias
14:59 2018/03/11 讓 vm.execute() vm.dictate() peforth.ok() 都傳回 vm 以便 support function cascade
19:22 2018/03/11 除了以上,連 stack, push, words, ... etc 都加上去了。
[x] %f magic command 暫無 auto-load, 必須 import peforth 才有 --> 解決了,雖然這樣也好。
"c:\Users\hcche\OneDrive\文件\Jupyter Notebooks\Creating an IPython extension with custom magic commands.ipynb"
討論複製過來如下:
[x] 如上述加上 c.InteractiveShellApp.extensions = ["c:\\Users\\hcche\\Downloads\\csvmagic.py"] 之後,無效。參考 [stackoverflow](https://stackoverflow.com/questions/27483637/auto-reload-extension-not-reloading-on-change) 學到用 '%load_ext c:\\Users\\hcche\\Downloads\\csvmagic.py' 在 jupyter notebook 或 ipython 中試試看 . . . 果然是 path 寫法的問題。照以上範例, csvmagic.py 位在 current directory 直接 '%load_ext csvmagic' 就可以了。如果不在 crrent directory 那就是要 importable 則手動放到 site-packages 去亦可,討論如下。
[x] 又或者必須是個 -m 搆得著的 module? 對了!上述的 importable 就是這個意思。--> 手動放進 site-packages (檔名改成 __init__.py) 就 importable 了,試試看 --> 成功!但是必須跑過 '%load_ext csvmagic' 之後才有 %%csv 不會自動 load。
[x] 而且 import csvmagic 也無效;然而經過以下正確安排之後 import peforth 有效,不知何故?
[x] 如何自動 load 應該跟 peforth 的 install 方式類似,這表示 csvmagic.py 所做的工作要由 `GitHub\peforth\peforthkernel.py` 來完成 (錯!要由 peforth 的 `__init__.py` 來負責)。其中 peforth %f 具有 line magic 與 cell magic 雙重腳色,該怎麼寫?看這裡:http://ipython.readthedocs.io/en/stable/config/custommagics.html
# from IPython.core.magic import (register_line_magic, register_cell_magic)
# @register_line_magic
# def f(line):
# peforth.vm.dictate(line)
#
# @register_cell_magic
# def f(line, cell):
# peforth.vm.dictate(cell)
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def f(line, cell=None):
if cell is None:
peforth.vm.dictate(line)
else:
peforth.vm.dictate(cell)
def load_ipython_extension(ipython):
ipython.register_magic_function(f, 'line_cell')
# see http://ipython.readthedocs.io/en/stable/api/generated/IPython.core.interactiveshell.html?highlight=register_magic_function
[x] (錯!) 放進 GitHub\peforth\peforthkernel.py
[x] (錯!) copy 到 c:\Users\hcche\AppData\Roaming\jupyter\kernels\peforth\kernel.json 所指到的位置:"c:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\peforth\\peforthkernel.py"
[x] 重新啟動 jupyter notebook --> 結果無效, 這表示上面這段 code 沒有被執行到。可能放在 GitHub\peforth\peforthkernel.py 不對(確定不對),也可能另有某個 .json 要指對地方。看 document 吧! --> 已知!c.InteractiveShellApp.extensions = ['peforth'] 就這行,所以上面這段要放在 peforth 的 __init__.py 才對 (對了)--> 再試試看 ... 還是無效,必須 import peforth 才行。目前這樣可以滿意了。
[x] 我猜是 c.InteractiveShellApp.extensions = ['csvmagic','peforth'] 所在的
profile_default\ipython_config.py 整個都無效之故。先前嘗試 "28 Jupyter
Notebook tips, tricks and shortcuts" 該檔的另一個設定也是無效。從 path 裡
有個 /test/ 看來,可能不是正確的檔案。--> 由 %f get_ipython :> ().ipython_dir
. cr 得知正確的位置是:`C:\Users\hcche\.ipython` 才對,也就是
`C:\Users\hcche\.ipython\profile_default\ipython_config.py` --> 試試看,
有沒有自動 load_ext . . . 有了!剛改好 `profile_default\ipython_config.py`
就馬上對新開的 jupyter notebook 有效。
[x] ipython 的 magic initialization in __init__.py 要防呆,避免從 python (none ipython)
執行時出問題。判斷有沒有 ipython 的方法要看在哪裡判斷的, peforth __init__.py 裡
好像太早,結果這兩個方法都 always false 而無效,不能自動 load_ext :
if 'get_ipython' in globals():
if '__IPYTHON__' in dir(__builtins__):
我看就算了,需要先 import peforth 有它的好處,例如 greeting 會出現在 import 的時候。
[x] 從 jupyter notebook 裡面 debug peforth 的 __init__.py 很方便!用 pdb.set_trace()
設個斷點在 ipython 判斷式前,查看以上兩個式子 --> 在當時都是 false !! 但我找到
這個可以:
'__IPYTHON__' in __builtins__.keys()
B i n g o ! ! 果然成功了,我發現 __builtins__ 的定義再那之後會變,而
__builtin__ 在那時甚至都還不存在。
[x] Tests before a Release v1.15
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> check errorlevel
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
--> pip3 install (/mnt/...the wheel) to WSL ubuntu
--> use virtualenv is fine
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
Run setup.bat 更新本地版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 直接用測過的 wheel update Pypi
[x] version 改成 1.16 (必須跳過 1.10 會變成 1.1)
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] README.md needs to improve the installation guide for jupyter notebook support
Install peforth kernel for Jupyter Notebook
If you have ipython and jupyter installed, do following steps to add peforth
as a kernel of Jupyter Notebook,
Install peforth kernel for Jupyter Notebook
1. install peforth
pip install peforth
2. copy
c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\kernel.json
到
c:\Users\yourname\AppData\Roaming\jupyter\kernels\peforth\kernel.json
如果上面的 target 目錄 kernels\ 或 peforth\ 不存在,則請手動建立這些目錄
3. 編輯剛才這個檔案
c:\Users\yourname\AppData\Roaming\jupyter\kernels\peforth\kernel.json
照您的電腦實際情況,訂正其中的這個 path
c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\peforthkernel.py
以上是我的電腦的範例
[/] 希望這個 installation 能自動化
refer to Ynote : "怎麼加 javascript kernel 進 jupyter notebook" _ijavascript_
[x] setup.bat update 上 Pypi 成功之後,有個 error :batch not found 之類。
upload v1.15 時發現的。應該是因為把 bye comment 掉了,往下看到 batch 的東西了。
[/] v1.15 %f 也發生了 comment 之後如果沒有 whitespace 會被下一行看到的問題
%f __main__ :> census_train['age'].head(2) . cr \ 奇怪,它怎知這 dtype 是 int64?
13:34 18/05/22 複製不出來, 上面這法都忘了怎來的了。
[x] 不認得的 words 自動到 __main__ 裡去找找看 <-- 成功了! v1.16
不認得的 words 自動到 locals 裡去找找看
不認得的 words 自動到 globals 裡去找找看
似乎 project-k 或看怎麼外掛一個序列 methods 用來處理 unknown workds
--> 執行一個 word 就叫做 unknown ( 'token' -- thing Y|n)
傳回 True 就表示處理過了,轉回 False 就表示沒處理 (default) 然則顯示
unknown 訊息。
--> 先做 __main__ 的比較簡單
: unknown py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=='Ûnknôwn' if drop false else true then ;
// ( token -- thing Y|N) Try to find the unknown in __main__
[x] 開始 support jupyter magics 之後冒出問題,直接跑 ipython -m peforth 出 error 如下。
先進 ipython 之後再 import peforth 就沒問題。
c:\Users\hcche\Documents\GitHub>ipython -i -m peforth
Python 3.6.0 (v3.6.0:41df79263a11, Dec 23 2016, 08:06:12) [MSC v.1900 64 bit (AMD64)]
Type 'copyright', 'credits' or 'license' for more information
IPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.
p e f o r t h v1.16
source code http://github.com/hcchengithub/peforth
Type 'peforth.ok()' to enter forth interpreter, 'exit' to come back.
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in run_module(mod_name, init_globals, run_name, alter_sys)
199 Returns the resulting top level namespace dictionary
200 """
--> 201 mod_name, mod_spec, code = _get_module_details(mod_name)
202 if run_name is None:
203 run_name = mod_name
c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in _get_module_details(mod_name, error)
140 try:
141 pkg_main_name = mod_name + ".__main__"
--> 142 return _get_module_details(pkg_main_name, error)
143 except error as e:
144 if mod_name not in sys.modules:
c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in _get_module_details(mod_name, error)
107 # Try importing the parent to avoid catching initialization errors
108 try:
--> 109 __import__(pkg_name)
110 except ImportError as e:
111 # If the parent or higher ancestor package is missing, let the
c:\Users\hcche\Documents\GitHub\peforth\__init__.py in <module>()
166 # Define peforth magic command, %f.
167 @register_line_cell_magic
--> 168 def f(line, cell=None):
169 if cell is None:
170 vm.dictate(line)
c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\magic.py in magic_deco(arg)
227 break
228 else:
--> 229 raise NameError('Decorator can only run in context where '
230 '`get_ipython` exists')
231
NameError: Decorator can only run in context where `get_ipython` exists
c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py:2598: UserWarning: Unknown failure executing module: <peforth>
warn('Unknown failure executing module: <%s>' % mod_name)
[x] ipython -m peforth 會出問題,可能是因為 get_ipython 當時還沒有 ready <-- 對
NameError: Decorator can only run in context where `get_ipython` exists
c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py:2598: UserWarning: Unknown failure executing module: <peforth>
warn('Unknown failure executing module: <%s>' % mod_name)
只要進了 ipython command prompt or jupyter notebook 都沒問題
In [2]: 'get_ipython' in globals()
Out[2]: True
--> 用對的方法檢查 ipython magic 存不存在即可,以上 error message 提供了線索
查看 python token 是否 defined 必須用 try-except:
try:
flag = "InteractiveShell" in str(get_ipython)
except:
flag = False
if flag:
from IPython.core.magic import register_line_cell_magic
... snip ....
注意, 解掉問題之後,如今:
1. jupyter notebook 完全沒問題。
2. 用 ipython -i -m peforth 跑起來的,exit 到 ipython 不認得 magic commands:
In [1]: %f
UsageError: Line magic function `%f` not found.
3. 先進 ipython 然後 import peforth 的才認得 magic commands.
[x] Tests before releasing v1.16
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
[x] 直接從 GitHub folder 執行 python peforth --> .s cd help exit
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[/] 2. ipython -i -m peforth .' Hello World!!' cr bye --> 目前有問題
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
import peforth
%f ." Hello FORTH!"
%%f Now we redefine the 'unknown' command that was doing nothing
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
\ here after, when FORTH come accross an unknown token, instead of an error
\ message, it try to find the token in python __main__ module name space.
y = 'abc'
%f y . cr
%f yy . cr
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 直接從 GitHub folder 執行 python peforth 先確定一把 --> .s cd help exit
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help exit
%f %%f magic command
[/] 5. repeat 以上 in ubuntu
--> pip3 install (/mnt/...the wheel) to WSL ubuntu
--> use virtualenv is fine
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 直接用測過的 wheel update Pypi
twine upload dist/*
ID, password search my Ynote with pypi _account_
[x] version 改成 1.17 (必須跳過 1.10 會變成 1.1)
[x] test mybinder.org to view peforth > notebook > *.ipynb
不行, 猜測還是 _the_path_issue_ 的問題 <--- no, setup.py issue, see below.
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] v1.16 released
[x] (create) in peforth.f 認為當有 command line 時就不要有 reDef 警告,讓畫面清爽
且 reDef 是常態。但是到了 jupyter notebook 底下, 他一定有 command line
jupyter notebook 下
%f py> commandline.strip() tib. ==> -f C:\Users\hcche\AppData\Roaming\jupyter\runtime\kernel-17e1c697-6363-49d3-b3af-81708a468835.json (<class 'str'>)
因此 reDef 警告就都消失了也不對。因為 jupyter notebook 之下 command line 完全
無用,因此原來的判斷方法可以保持,但是要排除 jupyter notebook 的場合。
結論是 --> ('jupyter' in str(sys.modules) or not commandline.strip())
[x] Tests before releasing v1.17
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
[x] 直接從 GitHub folder 執行 python peforth --> .s cd help exit
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
kernel > restart and clear outputs
import peforth
%%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
/// here after, when FORTH come accross an unknown token, instead of alerting
/// it try to find the token in python __main__ module name space.
: . tib. ; // ( tos -- ) A better dot that also prints the entire command line
/// For experiments that need to show both question and result.
/// "" . prints the command line only, w/o the TOS.
: path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module
CR word trim ( "path" ) py: sys.path.append(pop()) ;
code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 直接從 GitHub folder 執行 python peforth 先確定一把 --> .s cd help exit
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[x] 3. python import peforth
[x] no selftest, peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
[x] pip uninstall peforth
[x] pip install (/mnt/...the wheel) to WSL ubuntu
[x] ipython -m peforth
[x] ipython , import peforth , magic commands
[x] 直接用測過的 wheel update Pypi
繼續剛才的 setup.bat 即可,必要時: twine upload dist/*
ID, password search my Ynote with pypi _account_
--> 出錯! GFW?
HTTPError: 403 Client Error: Invalid or non-existent authentication information. for url: https://upload.pypi.org/legacy/
--> retry 看看 ... 這次就成功了!
c:\Users\hcche\Desktop\peforth-master>twine upload dist/*
Uploading distributions to https://upload.pypi.org/legacy/
Enter your username: hcchen5600
Enter your password:
Uploading peforth-1.17-py3-none-any.whl
12%|...snip....
c:\Users\hcche\Desktop\peforth-master>
--> 很奇怪,pypi.org 網頁上已經 upgraded 到 1.17 版了, WSL Ubuntu 下試過
pip uninstall peforth -> pip install peforth 也到 1.17 版了,就是
Windows DOS 下怎麼試都還是 1.16 ! 不管了,晚點再看 --> 真的過幾分鐘就好了!!
[x] version 改成 1.18 (必須跳過 1.10 會變成 1.1)
[x] test mybinder.org
[http://github.com/hcchengithub/peforth][master][notebook]
不行, 看來是 setup.py 的問題 --> see Ynote: "mybinder.org FileNotFoundErErrorno 2 No such file or directory"
--> RI: 不是 bug, setup.py 改名不要讓 mybinder.org 看到即可。
2018.12.15 這可能是為何名為 setup.py.whl 的原因,我正在研究 command line:
python setup.py install
也許就是 peforth 的 install from source 的正解。
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] v1.17 released --> verion.txt 跳成 v1.18
[x] v1.14 v1.15 v1.16 on WSL Ubuntu, virtualenv , _the_path_issue_
ipython still failed, message:
...snip...
~/tmp/deepspeech-venv/lib/python3.6/site-packages/peforth/__init__.py in readTextFile(pathname)
33
34 def readTextFile(pathname):
---> 35 f = open(pathname,'r',encoding='utf-8')
36 # for line in f:
37 s = f.read()
FileNotFoundError: [Errno 2] No such file or directory:
'/usr/local/lib/site-packages/peforth/version.txt' <--- 因為 .py 與其他 files 被分開放了
...snip...
[x] https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory
[x] v1.17 還是用 site.getsitepackages() 加上一點暴力
deli = '\\' if os.name == 'nt' else '/'
path = "something wrong peforth path not found"
for p in (pp for pp in site.getsitepackages() if pp.endswith("site-packages")):
dirs = p.split(deli)
if dirs[-2] != 'lib': # expecting 'lib'
dirs = dirs[:-2] + [dirs[-1]]; # if -2 is not 'lib' then remove it (pythonM.N or the likes)
if 'lib' in dirs: # extra check, may not be necessary
path = deli.join(dirs) + deli + "peforth" + deli
[x] test with WSL Ubuntu virtualenv --> failed
[x] v1.17 failed for WSL Ubuntu in both with and without virtualenv. <-- v1.21 FP
問題點:
When without virtualenv:
hcchen5600@WKS-4AEN0404:~$ python -m peforth
Traceback (most recent call last):
...snip...
File "/home/hcchen5600/.local/lib/python3.6/site-packages/peforth/__init__.py", line 67, in <module>
exec(readTextFile(path + "version.txt"),{},locals())
File "/home/hcchen5600/.local/lib/python3.6/site-packages/peforth/__init__.py", line 35, in readTextFile
f = open(pathname,'r',encoding='utf-8')
FileNotFoundError: [Errno 2] No such file or directory: 'something wrong peforth path not foundversion.txt'
When with virtualenv:
(playground) hcchen5600@WKS-4AEN0404:~$ python -m peforth
Traceback (most recent call last):
...snip...
File "/home/hcchen5600/playground/lib/python3.6/site-packages/peforth/__init__.py", line 57, in <module>
for p in (pp for pp in site.getsitepackages() if pp.endswith("site-packages")):
AttributeError: module 'site' has no attribute 'getsitepackages'
答案:
還是這篇文章:https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory
[x] 正確答案先直接列出來
w/o virtualenv /home/hcchen5600/.local/lib/site-packages/peforth/version.txt
with virtualenv /home/hcchen5600/playground/lib/site-packages/peforth/version.txt
w/o virtualenv C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\version.txt
[x] 方法一、 sys.path 終極答案
Ubuntu with virtualenv 可用(要剃除"python3.6")
>>> import sys
>>> [f for f in sys.path if f.endswith('site-packages')]
['/home/hcchen5600/playground/lib/python3.6/site-packages']
Ubuntu w/o virtualenv 可用(要剃除"python3.6")
>>> import sys
>>> [f for f in sys.path if f.endswith('site-packages')]
['/home/hcchen5600/.local/lib/python3.6/site-packages']
Windows w/o virtualenv 正確
>>> [f for f in sys.path if f.endswith('site-packages')]
['C:\\Users\\hcche\\AppData\\Roaming\\Python\\Python36\\site-packages',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages']
--> 用這個方法只要把 v1.17 的 __init__.py 原來 "site.getsitepackages()"
改成 "sys.path" 即可,真是的!
[x] 方法二、 site.getsitepackages() <--- v1.16 失敗,三個中最差的,我的媽!
python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])"
Windows w/o virtualenv 正確
python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])"
['C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages']
Ubuntu w/o virtualenv 錯誤!
hcchen5600@WKS-4AEN0404:~$ python
Python 3.6.5 (default, May 3 2018, 10:08:28)
[GCC 5.4.0 20160609] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import site
>>> site.getsitepackages()
['/usr/local/lib/python3.6/dist-packages', '/usr/lib/python3/dist-packages', '/usr/lib/python3.6/dist-packages']
Ubuntu with virtualenv 直接陣亡,根本不 support 這個命令!
(playground) hcchen5600@WKS-4AEN0404:~/playground/bin$ python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])"
Traceback (most recent call last):
File "<string>", line 1, in <module>
AttributeError: module 'site' has no attribute 'getsitepackages'
[x] 方法三、 不行! <--- python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
Windows w/o virtualenv 正確
c:\Users\hcche\Downloads>python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages
Ubuntu w/o virtualenv 錯誤!
hcchen5600@WKS-4AEN0404:~$ python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
/usr/lib/python3/dist-packages <--- 錯了,不能用。
Ubuntu with virtualenv 可用(要剃除"python3.6")
(playground) hcchen5600@WKS-4AEN0404:~/playground/bin$ python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
/home/hcchen5600/playground/lib/python3.6/site-packages
[x] 意外發現 python -m peforth include 1.f 時, 1.f 裡面不認得 ok() vm.ok()
RI: the recent __init__.py "run once" section that runs quit.f that runs command line
arguments is *before* the definition of ok()! --> I move it down to the bottom
then problem is gone. This solution will be released with v1.18.
[x] mybinder.org 跑不起來, peforth/version.txt file not found <--- RI: setup.py 改名就好了, expecting v1.18
See my Ynote: "mybinder.org FileNotFoundErErrorno 2 No such file or directory"
--> 我猜是 setup.py 的檔案結構,在 Desktop\peforth-master\ 處多用了一個 peforth
folder 如此一來, 從 project 本身的 setup.py 所在之處來看 version.txt 就不在
peforth/version.txt 而直接在 version.txt 才對。 v1.16 先直接修改
Desktop\peforth-master\setup.py
做一版 wheel 在 local 看成不成功, 若成功就證明研究生的檔案結構多一個 peforth
是沒必要的了,改掉就有機會了。
--> 真的做成了, 把 peforth/ 裡的東西都移上來, setup.py 改掉,不要 peforth/,
如下從 working directory 執行, 成功了!
c:\Users\hcche\Desktop\peforth-master>pip wheel --wheel-dir=dist .
Processing c:\users\hcche\desktop\peforth-master
Building wheels for collected packages: peforth
Running setup.py bdist_wheel for peforth ... done
Stored in directory: c:\users\hcche\desktop\peforth-master\dist
Successfully built peforth
c:\Users\hcche\Desktop\peforth-master>
--> 這表示根本不必搞到 Desktop\peforth-master 直接用 local GitHub repo 就可以
了 --> 錯錯錯! Desktop\peforth-master\peforth folder 是必須的
--> peforth/version.txt file not found 應該還是 _the_path_issue_
[x] 做成 1.17 release 以便查看 mybinder.org 解了沒? --> failed !!
see also : Ynote : "研究 peforth 的 path 到底正確該如何"
[x] setup.py 是研究生為了做出 peforth 的 whl 而設。既然 mybinder.org 也要來
看,就一定要更講究一點,我想就是這個原因....
--> 可能要兩個 setup.py, 一個 at peforth folder, the other is for building .whl.
when building .whl, the setup.py is at parent folder and is that a must ?
--> anywhere>pip wheel --wheel-dir=dist c:\Users\hcche\Desktop\peforth-master
c:\Users\hcche\Desktop\peforth-master>pip wheel --wheel-dir=dist peforth
以上都可以 build 出 peforthxxxxx.whl
--> peforth/setup.py 被 mybinder.org 看到了才出的問題, 把它改名成 setup.py.disabled 看看...
RI: 把 setup.py 名字改掉就好了!!! 不要讓 mybinder.org 看到 setup.py 即可。
--> 如前述,我們的 setup.py 是用來做 .whl 的,要給 pip 看的,不是要給 mybinder.org
看的。
[x] Merge 到 master 但可以不必急著 release, 純粹是 setup.py 的問題,跟程式無關。
只要讓 mybinder.org 能跑,改 github 上的 setup.py 成 setup.py.whl 即可。
--> expecting v1.18
[x] Tests before a Release v1.18 <--- on pypi.org already
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
[取消] 直接從 GitHub folder 執行 python peforth --> 等於是 -m peforth
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
kernel > restart and clear outputs
import peforth
%%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
/// here after, when FORTH come accross an unknown token, instead of alerting
/// it try to find the token in python __main__ module name space.
: . tib. ; // ( tos -- ) A better dot that also prints the entire command line
/// For experiments that need to show both question and result.
/// "" . prints the command line only, w/o the TOS.
: path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module
CR word trim ( "path" ) py: sys.path.append(pop()) ;
code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[x] 5. jupyter notebook --> peforth kernel
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[/] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[/] 3. python import peforth
[/] no selftest, peforth.ok() .s words <--- no parent
[/] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
[x] pip uninstall peforth
[x] pip install (/mnt/...the wheel) to WSL ubuntu
[x] ipython -m peforth
[x] ipython , import peforth , magic commands
[x] 直接用測過的 wheel update Pypi
繼續剛才的 setup.bat 即可,必要時: twine upload dist/*
ID, password search my Ynote with pypi _account_
[x] test mybinder.org @ [http://github.com/hcchengithub/peforth][develop][notebook]
這個跟 pypi.org 無關,只要 github 有 push 上去馬上生效。
[x] pypi.org 網頁上已經 upgraded 到 1.18 版了, WSL Ubuntu 下試過
pip uninstall peforth -> pip install peforth 也到 1.17 版了,就是
Windows DOS 下怎麼試都還是 1.16 ! 不管了,晚點再看 --> 真的過幾分鐘就好了!!
[x] WSL Ubuntu w/o virtualenv --> python -m peforth ... ok
[x] WSL Ubuntu with virtualenv --> python -m peforth ... ok
[/] test colab --> v1.18 還是 failed 還是 path 的問題 :-(
[x] version 改成 1.19 (必須跳過 1.10 會變成 1.1)
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] test colab --> v1.18 還是 failed 還是 path 的問題 :-(
v1.18 is failed on colab, the chance is that v1.16 works fine on colab.
[x] use v1.16 (pip install peforth==1.16 on colab) to check sys.path & site.getsitepackages()
---- from collab with peforth v1.16 ----
import site
site.getsitepackages()
['/usr/local/lib/python3.6/dist-packages',
'/usr/lib/python3/dist-packages',
'/usr/lib/python3.6/dist-packages']
import sys
sys.path
['',
'/env/python',
'/usr/lib/python36.zip',
'/usr/lib/python3.6',
'/usr/lib/python3.6/lib-dynload',
'/usr/local/lib/python3.6/dist-packages',
'/usr/lib/python3/dist-packages',
'/usr/local/lib/python3.6/dist-packages/IPython/extensions',
'/content/.ipython']
-------- actual peforth path on Google colab ---------------
!ls /usr/local/lib/python3.6/dist-packages/peforth
__init__.py __main__.py peforthkernel.py projectk.py __pycache__ setup.py
!ls /usr/local/lib/site-packages/peforth
kernel.json peforthkernel.py __pycache__ version.txt
peforth.f peforth.selftest quit.f
[/] So, the answer is clear here . . . try all possible directories with some
guess to find /peforth/version.txt that's doable
[x] can be setup.py's problem. I don't think all modules are facing the same
annoying problem. --> try to simplify setup.py.whl
--> RTFD : https://packaging.python.org/guides/distributing-packages-using-setuptools/?highlight=data_files#data-files
[x] testing c:\Users\hcche\Desktop\peforth-master\setup.py.improved that uses
package_data={...} instead of data_files=[...] in sety.py
--> 用改過的 setup.py 重作 wheel
很奇怪,必須用 github\peforth\setup.bat 做否則 pip wheel 根本不 build 總之有個辦法可行做出了 v1.19
See Ynote: "Pack peforth to peforth.whl" > "2018/07/02 13:06" 的討論。
--> 直接看 ~.whl (zip檔)就知道成功了!
[x] v1.18 用 sys.path 的加工不對了 --> 改掉
[x] path="" 只有 setup.bat 要看才出錯,真的不行嗎?
--> 真的不行,讀 version.txt 時的 os.getcwd() 真的就是當時的 working directory,這樣不行。
--> 所以用 sys.path 的方法還是要用 --> windows 本來就沒錯了呀!
[x] 改掉 setup.py 的好處是 data files 與 .py 都在一起了,但是 path 如何取得
還是個問題 -- Ubuntu and colab 不能兩全 --> 用 sys.path 去 serch peforth/version.txt
還是唯一的辦法 ... 不難:
path = "something wrong peforth path not found"
for p in (pp for pp in sys.path if pp.endswith("site-packages")):
if os.path.isfile(p + deli + 'peforth' + deli + 'version.txt'):
path = p + deli + 'peforth' + deli
break
vm.path = path
pdb.set_trace() # *debug*
[x] windows (none anaconda virtualenv), WSL Ubuntu w/o virtualenv, with virtualenv
--> all pass!
[x] Tests before a Release v1.19 --> v1.21 actually
[x] 所有 run 法帶 selftest:
[x] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
kernel > restart and clear outputs
import peforth
%%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
/// here after, when FORTH come accross an unknown token, instead of alerting
/// it try to find the token in python __main__ module name space.
: . tib. ; // ( tos -- ) A better dot that also prints the entire command line
/// For experiments that need to show both question and result.
/// "" . prints the command line only, w/o the TOS.
: path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module
CR word trim ( "path" ) py: sys.path.append(pop()) ;
code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[x] 5. jupyter notebook --> peforth kernel --> .s words
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[x] 3. python import peforth
[x] no selftest, peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
[x] pip uninstall peforth
[x] pip install (/mnt/...the wheel) to WSL ubuntu
[/] ipython -m peforth
[/] ipython , import peforth , magic commands
[x] 直接用測過的 wheel update Pypi
繼續剛才的 setup.bat 即可,必要時: twine upload dist/*
ID, password search my Ynote with pypi _account_
[x] pypi.org 網頁上已經 upgraded 到 1.19 版了,
若不行,晚點再看,過幾分鐘就好。
[x] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[x] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[x] Windows DOS 下試
[x] test mybinder.org @ [http://github.com/hcchengithub/peforth][develop][notebook]
這個跟 pypi.org 無關,只要 github 有 push 上去馬上生效。
[x] test colab --> v1.19 --> shit, 又錯了! 不能限定要 site-packages, dist-packages 也要接受
deli = '\\' if os.name == 'nt' else '/'
path = "wrong"
for p in sys.path:
if os.path.isfile(p + deli + 'peforth' + deli + 'version.txt'):
path = p + deli + 'peforth' + deli
break
以上這改就對了,出 v1.21 版吧! Shit shit . . .
[x] __init__.py
[x] rebuild setup.bat
[x] release v1.21 to pypi.org
[x] test colab ... !pip install peforth==1.21 要等一等。。。 v1.21 成功了! 嗚嗚嗚
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] version 改成 1.22 (必須跳過 1.20 會變成 1.2)
[x] 14:48 2018-12-09 python object (attributes -> values) and hash table or
dictionary (keys --> values) are confusing me especially when JavaScript sees
both the samething. The python 'dir' function lists an object's attributes and
JSON can stringify a hash table to a readable string. Let's make an experient:
\ o1 is a dict
py> {'a':11,'b':22} constant o1
OK o1 tib. --> {'a': 11, 'b': 22} \ it's a dict so it's shown as a dict
OK o1 :> keys() . cr --> dict_keys(['a', 'b']) \ dict has keys
OK o1 :> values() . cr --> dict_values([11, 22]) \ dict has values
\ it's also an ojbect
OK o1 dir . cr \ so it has attributes
--> ['clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values']
OK o1 stringify . cr
{
"a": 11,
"b": 22
}
OK
這樣看來,dict 與 object 的混淆是 JavaScript user 的問題。 任何東西都是 object 而只有 dict 才有 hash table.
用 (see) dir .members 查看 object 的 attributes, 用 (see) keys values 查看 dict. 用 stringify 查看 dict'fy 之後的
任何東西。
--> 結論是在 help (see) 裡講清楚就好了。 來自 jeforth 的 obj>keys 與 dir 或 keys 重複所以很少用了。
[x] Install peforth from source
---- 2018.12.15 懂得用 python setup.py install 需要修改 ----
[x] Ynote: "研究 install peforth from source 的方法" 已經成功。
[x] 結論是: peforth/ 目錄結構要遷就研究生的安排,改變原先其實不太自然的執行方式:
C:\Users\hcche\Documents\GitHub\>python peforth
變成從 peforth 目錄裡面執行,這很好哇! [X] v1.22 1.23 __main__.py 還是用 import peforth 的,沒意思 --> 有 support test.py 取代 __main__.py 供 developing debugging 用
[x] pywinio repo 裡面也是又有一個 pywinio/ folder, 將來 peforth 也是這樣。
[x] 照研究生的目錄結構改 GitHub/peforth
c:\Users\hcche\Documents\GitHub\peforth\..
Directories Files
-------------------- ---------------------------
.git\ .gitattributes
.ipynb_checkpoints\ LICENCE
__pycache__\ admin.bat
notebook\ requirements.txt
peforth\ LICENSE
peforth.egg-info\ README.md
playground\ README.rst
setup.bat
setup.py
setup.py.whl
log.txt
.gitignore
c:\Users\hcche\Documents\GitHub\peforth\peforth\..
Directories Files
-------------------- ---------------------------
__main__.py
kernel.json
peforthkernel.py
projectk.py
peforth.selftest
version.txt
__init__.py
quit.f
peforth.f
[x] remove existing peforth so aso to try setup.py install
Python on my computer at home is anaconda, so though that I have to remove it
by "conda uninstall" command. That was wrong. Do it by pip as usual works fine.
see Ynote:"研究 install peforth from source 的方法" for the log.
[x] now try "python setup.py install"
it works !!!!
如何查看 setup.py 的 help: c:\Users\hcche\Documents\GitHub\peforth>python setup.py --help
[x] setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[x] 13:27 2019-03-06 code ... end-code 可以取 xt.__doc__ 2nd line 當作 help
code txt2json # ( txt -- dict ) Convert given string to dictionary
push(json.loads("".join([ c if c != "'" else '"' for c in pop()])))
end-code
' txt2json :> xt :> __doc__ --> def xt(_me=None): ### txt2json ###
# ( txt -- dict ) Convert given string to dictionary
push(json.loads("".join([ c if c != "'" else '"' for c in pop()]))) (<class 'str'>)
18:04 2019-05-09 寫好了:
# projectk.py 裡面
# The basic FORTH word 'end-code's run time.
def doendcode(_me=None):
global compiling
if compiling!="code":
panic("Error! 'end-code' a none code word.")
current_word_list().append(Word(newname,newxt))
last().vid = current;
last().wid = len(current_word_list())-1;
last().type = 'code';
# ---------
mm = re.match(r"^.*?#\s*(.*)$", last().xt.__doc__.split('\n')[1])
last().help = mm.groups()[0] if mm and mm.groups()[0] else ""
# ---------
wordhash[last().name] = last();
compiling = False;
--> py> doendcode .source <---- 看到對的 source code 了
[x] 試驗定義一個 code word 查看他的 help 果然第一行的 # foo bar 有被抓進去當 help 了。
[X] unkown debug locals() 的說明 copy 過來
older unsync'ed notes on my LRV2
v1.22 既然 peforth 主要都是用來配合 jupyter notebook trace code, set breakpoints, ... etc.
unknown and ... and # should be added into the built-in words, plus the ability to
view local variables.
[x] I remember that I have done making 'unknown' predefined . . . no.
16:51 2019-01-12 I am now working on making 'unknown' to try locals. __main__ is
an object so global variables are accessed by getattr() however locals and globals
are dictionary that should be accessed by dict.get(key,default) instead.
see https://stackoverflow.com/questions/3089186/python-getattr-equivalent-for-dictionaries
[x] done an example @
http://localhost:8888/notebooks/OneDrive/%E6%96%87%E4%BB%B6/Jupyter%20Notebooks/Siraj%20make_a_neural_net_live_demo.ipynb
Source Code
===========
none value _locals_ // ( -- dict ) locals passed down from ok()
false value debug // ( -- flag ) enable/disable the ok() breakpoint
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ or _locals_
_locals_ if \ in a function
( token ) _locals_ :> get(tos(),"Ûnknôwn") ( token, local )
py> str(tos())!="Ûnknôwn" ( token, local, unknown? )
if ( token, local ) nip true exit ( return local Y ) else drop ( token ) then
then
( token ) py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") ( thing )
py> str(tos())=="Ûnknôwn" if ( thing ) drop false else true then ;
/// Example: Set a breakpoint in python code like this:
/// if peforth.execute('debug').pop() : peforth.push(locals()).ok("bp>",cmd='to _locals_')
/// Example: Save locals for investigations:
/// if peforth.execute('debug').pop() : peforth.push(locals()).dictate('to _locals_')
/// That enters peforth that knows variables in __main__ and locals at the breakpoint.
/// 'exit' to leave the breakpoint and forget locals.
: exit ( -- ) // ( -- ) Exit the breakpoint forget locals and continue the process
none to _locals_ py: vm.exit=True ;
code # print(nexttoken('\n')+'\n') end-code // print the comment line after #
: --> ( result -- ) // Print the result with the command line.
py> tib[:ntib].rfind("\n") py> tib[max(pop(),0):ntib].strip() ( result cmd-line )
s" {} {} ({})" :> format(pop(),tos(),type(pop())) . cr ;
/// Good for experiments that need to show command line and the result.
[X] 10:48 2019-05-11 older note
開發中,不要動到 pip'ed peforth 出錯很麻煩,所以想要從 working folder 執行
不要每次都得先 pip install 改入 site-packages
[x] __main__.py 當初為何他媽 import peforth 有屁用?就是要跑本地版本試驗改過的東西才有意義呀!
--> 15:48 2019-05-11 應該是 path 搞不定,簡化問題 (Since commit c3d7677 on Oct 8, 2017)。
__main__.py 是用 python -m peforth 執行時的 entry,必須照顧。
11:26 2019-05-11 while __init.py__ is 'import peforth' entry point.
--> 11:24 2019-05-11 __main__.py 就是 run
c:\Users\hcche\Documents\GitHub\peforth>python peforth
and
c:\Users\hcche\Documents>python -m peforth
時被執行的入口
see https://www.tuicool.com/articles/iYRfe2
https://stackoverflow.com/questions/44977227/how-to-configure-main-py-init-py-and-setup-py-for-a-basic-package
--> 11:51 2019-05-11 how about to have test.py that does what __main__.py is supposed to do when
running ~GitHub\peforth>python peforth?
--> this is a good idea, but the path in __init__.py will be wrong, deal with it!!
--> 從 __init__.py 裡面處理 path 處添加可能找到 version.txt 的地方即可。 成功。
--> 成功了,能直接執行就好,不一定要堅持像早期一樣執行 peforth 目錄。
執行方法: c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py
__run__.py --> 最終命名為 test.py 最自然
# 各種方法都試過,最後還是用 exec(open().read()) 最像 include
# from . import __init__
# from __init__ import ok
# import subprocess; subprocess.call("__init__.py", shell=True)
exec(open("__init__.py").read()) # this is like: include __init__.py
ok('\n')
[X] __main__.py 還是要用 import peforth 的,若不然一開始 open("__init__.py") 就 file not found 了。
而 test.py 當然是在對的 directory 之下才能執行,所以叫做 test.py ;-D
[x] 若要餵進 "python test.py foo bar" 執行 command line 則 test.py 就要用來分辨
是否「從 ipython, jupyternotebook 執行」 (參見 quit.f) 所以 test.py 檔名
就不能改了,要改連 quit.f 也要一起改。或者改進 quit.f 裡分辨 ipython 的方法。
\ ~~~~~~ quit.f ~~~~~~
\ When in ipython or jupyter notebook the command line is used by
\ ipython already. In jupyter notebook, it looks like:
\
\ vm.commandline ----------------------------------------------------------------------------------.
\ sys.argv[0] --------. |
\ | |
\ V V
\ --------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------
\ c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py -f C:\Users\hcche\AppData\Roaming\jupyter\runtime\kernel-4be53345-1ddd-47c2-bef2-5e9801688f3f.json
\ So peforth can't support command line statements for ipython and jupyter notebook.
\ For none ipython cases, I have no better idea than to check sys.argv[0] for '.py'
\ and the likes so far 2019-05-15. See the following code, the filename 'test.py' is
\ fixed-coded here therefore.
\
[X] command line 也是跑 site-package 之外的 .f 檔的方法,例如:
c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py include ..\playground\misc.f
c:\Users\hcche\Documents\>python -m peforth include GitHub\peforth\playground\misc.f
這兩行都可以。
[x] 18:35 2019-05-09 我忘了 peforth 要怎麼 maintain 了!!!! 以上程式要改到哪裡去?
--> 直接在 github working directory 修改
--> 這樣 run 到的還是 installed 到 site-packages 的版本,因為 __main__.py 其實是 import peforth
c:\Users\hcche\Documents\GitHub\peforth>python peforth
16:48 2019-05-11 這個早期的 run 法如今 改成
c:\Users\hcche\Documents\GitHub\peforth\peforth> python test.py
--> 16:48 2019-05-10 奇怪 LRV2 OA 上 pip list 看到的 peforth 是 1.21!!
但是 python -m peforth 跑到的是 1.22,經過 pip uninstall peforth 之後
馬上 pip list 卻看到了 peforth 1.23 (對了)
[x] 16:38 2019-05-22 release v1.23 時在 T550 又看到類似現象: pip uninstall peforth
之後有把 python setup.py install 灌上的 v1.23 uninstall 掉,但是 site-packages 裡面一查,
仍有 v1.22 的 egg 存在 --> 直接再 pip uninstall peforth 一次,才把它 uninstall 掉。
--> pip install peforth 下來的在 site-packages 裡面就沒有 egg 字樣,如此可供分辨。同時
也證實 pip uninstall 不會 remove egg 版的 (python setup.py install上去的) 要下
多次 pip uninstall peforth 才輪得到舊版。
--> 我猜: 剛才改好程式之後用 ~\GitHub\peforth>python setup.py install 安裝進 site-package
的 1.23 並沒有蓋掉原來的, 因為這時裝上的是 egg, path 與用 whl 裝上的不同!
[X] 經過 c:\Users\hcche\Documents\GitHub\peforth>python setup.py install
之後,確實會直接有類似 pip install 的效果 --> 可以 python -m peforth 執行了,但是 path 不同
pip install 的 c:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\version.txt
setup.py 的 c:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth-1.23-py3.6.egg\peforth\version.txt
[X] 用 ~\GitHub\peforth>python setup.py install 安裝進 site-package 雖然 path 不同,jupyter notebook
完全沒問題,頂多 Kernel > Restart 一下,馬上生效。完全符合我 「從 source 直接 install」 的期待,免去 pip install
或先前暴力 setup.bat 的麻煩。
結論:
1. 直接修改 GitHub source code (善用 GitHub 保障各版本安全)
2. pip uninstall peforth 把舊的清乾淨
3. c:\Users\hcche\Documents\GitHub\peforth>python setup.py install 從 source code 安裝
4. 有兩種方式執行、測試
a. 用 Jupyter Notebook 試驗,只要 Kernel > Restart 新版就生效了。
b. 執行 c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py
5. repeat
[X] 19:07 2019-05-13 這段 code 在 peforth.f 裡面本應處裡 alias 的新 // help, 但是又有問題
\
\ Redefine // to "replace" alias' help message instead of "append".
\
\ Append if last().help has stack diagram but no help message, otherewise replace.
\ Stack diagram might be unexpectedly given again. That can be resolved by putting
\ complete help message to the original word or use the trick of // dummy and then
\ // again or simply don't give it again in the alias' help message.
\
<py>
'''
m = re.match("(?P<before>.*?)(?P<stackdiagram>\(.*\))(?P<after>.*)", last().help)
if m and (m.groupdict()['before'] + m.groupdict()['after']).strip()=="":
last().help += nexttoken('\\n|\\r');
else:
last().help = nexttoken('\\n|\\r');
'''
</pyV> -indent ' // py: pop().xt=genxt("//",pop(1))
問題如下,有些東西 help 裡面的 stack diagram 不見了!!
[r Prepare an array of data to compare with rstack in selftest.
Example: [r 1,2,3 r] [d True d] [p 'word1','word2' p]
[r...r] section is optional, [d...d] section is the judge.
--> 點掉也沒用! --> 13:34 2019-05-15 misc.f 裡面的新 ( comment ) 造成的。
--> 19:15 2019-05-15 已經乾脆放棄讓 (comment) 自動進 help 了,要 help 用 // 就好了。
(comment) 直接改成 nested 的,更好。 v1.23
[X] 14:06 2019-05-15 現在覺得原來的 (comment) 沒有我 gist words4jupyter.py 的 nested (comment) 好。
何必搞個這麼難懂的 (comment) 就會了讓 stack diagram 進 last.help 而已,有 // 就夠了!
[X] 16:39 2019-05-16 本來的 // 一直想著前面有 (comment) 已經進 help 了!所以他是用 += 的,
難怪有這個問題,不要了,直接用 last().help = nexttoken('\n|\r'); 就好了。 v1.23
\ to be
code ( # ( <str> -- ) // Comment down to ')' which can be nested if balanced
nextstring('\(|\)')['str'] # skip TIB to the next delimiter
cc = tib[ntib] # cc must be delimiter '(', ')', or '\n'
vm.ntib+=1 # skip any of them
if cc=='(':
execute(_me) # recursion of (
execute(_me) # recursion of )
end-code immediate
\ was
code ( # ( <stack diagram> -- ) Get stack diagram to the last's help.
a = nexttoken('\\)')
b = nexttoken() # the ')'
if compiling and last().help=="": # skip if help alreay exists
last().help = '( ' + a + b + ' '
end-code immediate
/// Nested not allowed yet.
[X] 經 marker 刪除的 value & constant 留在 vm[context] 裡面的 garbage
沒有回收! marker 還要再加強,forget 也要注意。
--> 123 value x char abc value ss vm.forth dict>keys -->
dict_keys(['CRLF', 'obj2dict', '_locals_', 'debug', 'screen-buffer',
'description', 'expected_rstack', 'expected_stack', 'test-result',
'[all-pass]', 'xxx', 'x', 'y', 'ss'])
^^^ ^^^^ 有在 vm.forth 裡面
--> 執行 marker --> words 裡沒有 x, ss 了, 當然 --> 但是 vm.forth 裡還是存在,造成堆積!!
v1.23 還是有這個問題,不知道該怎麼做。。。。
FP, see below 2020/07/27 08:38:15 value constant to 要重新定義. . . . .
[X] 改寫所有的 code words 把彆扭的 help 用新的 # 功能改自然點。
done! v1.23
[X] quit.f 裡的怪東西都不要了 --> inport, outport, harry_port v1.23
[X] 把 gist 上的東西 include 進來,最主要的是有 support nesting 的 (comment) v1.23
[X] 取消 colon definition 中第一個 ( ... ) 的作用,只用 // 即可留 help
--> 唉,試了就知道,很醜! v1.23 真的實現了
Notepad++ ^h replace regular expression
Find what: "(\(\s+.*\))\s+(//)"
Replace with: "// \1"
[x] dos , cd 太重要了,從 misc.f 移進 peforth.f
[X] 17:53 2019-05-11 接下來考慮出 v1.23 版。
[X] complete self-tests for new words , many are commented out.
[X] 評估 misc.f unknown.f quit.f 的內容要怎麼分配 --> 全部放進 misc.f 加個 marker 全自動 load 進去。
--> 不要的人只要跑一下 marker 就可以全清掉。
--> 這些東西的 self-test 就要自己做,不能放 peforth.selftest 裡。
[X] peforth.f source code 裡還有很多中文
[X] 好像 *debug* 出不來....
--> 喔喔 是給 breakpoint 用的 exit 出的問題。
--> 趁放進 misc.f 的機會給它改名吧! quit
[X] 測試 jupyter notebook
[x] established the method to include misc.f from within quit.f
[X] 測試 ipython (DOS box)
[X] 進 ipython 之後 import peforth 看起來 self-test 都 ok, 但是從此之後 ipython 就無法輸出了。
執行 ipython -m peforth 也一樣。
--> ipython 自己的 display 也被關了,執行 peforth.dictate('display-on') 即可恢復。
--> 是 selftest 的 display-off 造成的? --> 槓掉 self-test 試試看... 真的好了!
連 ipython -m peforth 也好了。
--> 執行 c:\Users\hcche\Documents\GitHub\peforth\peforth>ipython test.py self-test 與之後的
功能都沒問題.
[X] self-test on > 做出問題 > 然後下達 display-on 之後,治好了!證實 root cause 是 display-off. (最後發現,錯!不是這樣)
but where? --> 從 quit.f 裡把 misc.f comment out 也好了, 故問題在他裡面。--> 找到了, pyclude 的
self-test 之前 stop 就好了 --> 查 display-off 怎麼弄的? --> display-on 只是 reset sys.stdout 而已
無可挑剔。算了,有 workaround 就好了。
[x] WSL Ubuntu 之下 display-off 之後的斷點 *debug* 也怪怪的,本想在其中下 display-on 再回來繼續,
結果一 exit 回來就回 Shell 了。試過 time delay 如下也無效。
<py>
# 拖時間
factorial = 1
for i in range(2,10000):
factorial = factorial * i
</py>
[X] 15:24 2019-05-22 靠! 連 Windows DOS 下也出現了這個問題, SRP: working directory 的差別
有問題 c:\Users\hcche\Documents\GitHub\peforth>python -m peforth
沒問題 c:\Users\hcche\Documents\GitHub\peforth\peforth>python -m peforth
發生在 *** (pyclude) 之前 --> 故意先做個 display-off on 看看....
RI: Bingo! Shit! selftest 裡 pyclude hello.py 必須以其所在位置為 working directory
[x] 15:55 2019-05-22 發現這個 root cause 是耐心跑 v1.23 release check-list 時發現的,所以
那個 check-list 還是要好好做。
[/] 測試 ubuntu 的 ipython ---> 放棄,error message 如下:
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ ipython
Command 'ipython' not found, but can be installed with:
sudo apt install ipython
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ sudo apt install ipython
[sudo] password for hcchen:
Reading package lists... Done
Building dependency tree
Reading state information... Done
Package ipython is not available, but is referred to by another package.
This may mean that the package is missing, has been obsoleted, or
is only available from another source
E: Package 'ipython' has no installation candidate
[X] Error! tib. unknown! --> 改成 "-->" 了 1.23
[X] 改寫 pypi.org 上的 readme.rst 本來的例子不太好了, pdb 其實很強。
改用 Azure notebook 介紹 ipython 的 magic command 比較好。
[X] 17:33 2019-05-19 改了 Github.com 上的 README.md , local 的 .rst
[X] jupyter notebook 用 import peforth 就很好用了,
把 readme.md 裡沒有的 peforth kernel 拿掉,移進 Wiki 裡去。
--> 17:34 2019-05-19 done
[X] 把 misc.f hello.py 等都加進 package
[X] Test ubuntu 發現 cd 有必要進 peforth.f 但 dos 就該留在 misc.f 裡,且要判斷 os 是哪個。
--> py> os.name . cr ( posix or nt )
[X] v1.23 測試 ubuntu --> 靠!都忘了怎麼測試了,可以不經過 pip 版嗎?
09:35 2019-05-22
--> T550 ubuntu 16.04 連 pip 都沒有, python 版本也搞不清, 更不用說 virtualenv 了。
--> 感覺用 Linux 很恐慌,乾脆把 T550 上的 Ubuntu 16.04 remove 掉,改用新版的,希望可以避開
python 版本的問題。(See Ynote:"[筆記] Install Mozilla DeepSpeech Project" > "wsl ubuntu install python3.6.txt")
--> 09:39 2019-05-22 T550 Ubuntu removed --> The recent is still 18.04 on
Microsoft Store, so be it --> 10:43 2019-05-22 WSL installed
--> how's the built-in python? --> See Ynote "好久沒玩 WSL Ubuntu, 為了 release peforth v1.23 測試整個再玩一次"
[X] 有 python 3.6.5 built-in 沒有 pip <--- 先不管它,只測 python test.py 過了再說。 --> 一番折騰,過了!
[/] 沒有 pip 可以 python -m peforth 嗎? 試試 python setup.py install 結果失敗
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ python setup.py install
Traceback (most recent call last):
File "setup.py", line 4, in <module>
from setuptools import setup
ModuleNotFoundError: No module named 'setuptools' <------------------
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$
[X] 看了這篇 https://askubuntu.com/questions/861265/python-3-importerror-no-module-named-setuptools-ubuntu-14-04-lts
決定放棄,有測過 test.py 就好了。
[/] 即使上了 pypi.org 也還需要 pip (但 18.04 default 沒有), 不管了,有測過 test.py 就好了。
[/] 上了 pypi.org 之後,再用 Azure Notebooks 測試。
[X] Tests before a Release v1.23
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[X] 所有 run 法帶 selftest:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 1. python -i -m peforth [/] with-selftest .s words exit bye
[X] 2. ipython -i -m peforth .' Hello World!!' cr bye
[X] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[X] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[/] 5. jupyter notebook --> peforth kernel --> .s words
[/] 考慮 README.rst 改良
[X] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[X] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[X] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[x] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[X] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[X] pip uninstall peforth
[X] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[X] 1. (i)python -i -m peforth [/] no-selftest .s words exit
[X] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[X] 3. (i)python import peforth
[X] no selftest, peforth.ok() .s words <--- no parent
[X] 1234 bye check echo %errorlevel%
[X] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[/] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[/] pip uninstall peforth
[/] pip install (use /mnt/...the wheel) to WSL ubuntu
[/] ipython -m peforth
[/] ipython , import peforth , magic commands
[X] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
[X] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[/] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[/] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[X] Windows DOS 下試
[X] Test Azure Online Jupyter Notebooks
https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
[X] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[X] version 改成 1.24 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
[X] 11:28 2019-05-26 make a master merge for the article of Febenacci and Decorator
[X] rename the article to 'peforth helps to understand python Decorator'
[/] 11:35 2019-05-26 write an article to introduce 'unknown'
--> forget this, covered already.
[/] 11:35 2019-05-26 find the video I introduce 'unknown' and the other thing
--> forget this, covered already.
[X] 09:11 2019-11-21 本來跑 GitHub\peforth\setup.bat 讓改好的新版生效,在 anaconda 之下還行嗎?
1. 跑 anaconda's prompt make sure python runable
2. peforth runable too, check path
3. cd to GitHub\peforth run setup
4. check peforth
OneNote 筆記:
"Develop peforth in an Anaconda virtual environment"
https://onedrive.live.com/view.aspx?resid=A796EA18AC8C1DA9%2112289&id=documents&wd=target%28Anaconda.one%7CB4E0DFAB-84F7-43D2-A5AB-515B43314252%2FDevelop%20peforth%20in%20an%20Anaconda%20virtual%20environment%7C99DE5C5F-B36D-4949-9471-BC7A857E3C2B%2F%29
[X] 16:54 2019-07-22 從這裡 https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/install.html 讀到
有從 github repo 上直接 pip install 的方法,e.g.:
pip install https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tarball/master
試試看 peforth 可不可以這樣 install ? 可以的話就不用上 pypi 了
pip install https://github.com/hcchengithub/peforth/master
or
pip install https://github.com/hcchengithub/peforth
==> 結果兩個都失敗
[ ] GitHub 有開始做 package hosting 了:
https://help.github.com/en/github/managing-packages-with-github-packages/about-github-packages#supported-clients-and-formats
[X] 2019/11/24 06:10:22 projectk.py 裡 import 好多它本身不用的 modules (它自己只
用到 re regular expression 一個) 我的註解說:
import re # import whatever we want, don't rely on parent module e.g. peforth __init__.py
也是有理,因為 projectk.py kernel 有自己的 space. 然而 modules 應該是 global
的, 不是嗎?從 forth code 裡 import 不行嗎? --> 試了就知道,把 projectk.py 裡
多餘的 imports comment 掉 --> 出問題的時候很晚,只要是 native modules 有機會解決.....
來自 help import 的 hints
\ import os __main__ :: peforth.projectk.os=pop(1) \ peforth global , does not work when run by 'python test.py'
import os py> vm :: os=pop(1) \ this works! when run by 'python test.py'
import inspect py> vm :: inspect=pop(1)
import dis py> vm :: dis=pop(1)
import json py> vm :: json=pop(1)
但是 sys 太根本了必須要在 projectk.py 裡 import 好。
[X] setup.py 裡的 copy right 年份要改成 2019
[/] 2019/11/24 05:20 用 Anaconda 之後似乎 kernel.json 也有問題?
裡面描述的 peforthkernel.py path 是寫死的,在我 OA、Anaconda 上就不對了。
好像只要無意把 peforth 加進 JupyterNotebook 的 kernel 就沒問題。
[X] 05:29 2019-11-21 projectk.py 裡面的 local, Comment, debug 這三個 global token 好像是多
餘的, 有空檢討看看.
[X] local 可能是 ok(prompt='OK ', loc={}, glo={}, cmd="") 或 redefined unknown 用的 <--- 不是
13:47 2019/11/25 delete all suspected things from projectk.py --> dos ok, jupyternotebook ok.
* 注意!setup.bat 不會更新 site-packages 的 peforth\ folder 要手動從 peforth-1.24-py3.7.egg <== 2020.7.28 解了! see OneNote2020 > "Develop peforth in an Anaconda virtual environment"
copy peforth\ 來蓋過 site-packages 裡的 peforth\ folder.
refer to https://onedrive.live.com/view.aspx?resid=A796EA18AC8C1DA9%2112289&id=documents&wd=target%28Anaconda.one%7CB4E0DFAB-84F7-43D2-A5AB-515B43314252%2FDevelop%20peforth%20in%20an%20Anaconda%20virtual%20environment%7C99DE5C5F-B36D-4949-9471-BC7A857E3C2B%2F%29
[X] 14:41 2019/11/25 quit.f 裡這種東西應該要改良,太笨了:
import os py> vm :: os=pop(1) \ 太笨
import os \ 應該改良成這樣
[X] 15:21 2019/11/25 整理 peforth.f quit.f peforth.selftest 的關係,更有系統了。
__init__.py 只 load 進基本的 peforth.f quit.f 其他的都由 quit.f 負責,使 quit.f
成為 eforth 系統的 main program, 統籌者。
[X] 16:07 2019/11/25 一舉搞懂 pop(1)
code test # ( a b c -- ) print given things
print(pop(), pop(), pop()) end-code
1 2 3 test
3 2 1 <-- 結果,顯示三個 pop() 是從左到右抓取 TOS 的。
[X] Tests before a Release v1.24
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑
c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[X] 所有 run 法帶 selftest:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[X] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[X] 1. python -i -m peforth [X] with-selftest .s words exit bye
[X] 2. ipython -i -m peforth .' Hello World!!' cr bye
[X] 3. ipython import peforth .s words
[X] selftest peforth.ok() .s words <--- w/parent
[X] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[X] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[X] 5. jupyter notebook --> peforth kernel --> .s words
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[X] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[X] 同上 python test.py 先試試看
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[X] 同上 repeat 1) python -m peforth 2) ipython -m peforth
[X] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[X] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[X] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[X] pip uninstall peforth
site-packages 下兩個 peforth folder 刪掉了。
setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會
被 skip 過去。
[X] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[X] 1. (i)python -i -m peforth [X] no-selftest .s words exit
[X] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[X] 3. (i)python import peforth
[X] no selftest, peforth.ok() .s words <--- no parent
[X] 1234 bye check echo %errorlevel%
[X] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[/] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[/] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了
[/] pip install (use /mnt/...the wheel) to WSL ubuntu
[/] ipython -m peforth
[/] ipython , import peforth , magic commands
[X] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply.
[X] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[X] Windows DOS 下試
[/] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[/] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[X] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
\ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令
!cat /etc/os-release
%f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu!
%pwd \ 查看 working directory
[x] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT
[X] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
[X] notebooks.ai 也測測看
[X] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[X] version 改成 1.25 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
[X] 要不要把 projectk.py sync 回 project-k
(很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的?
05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是
硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。
[ ] 15:56 2019/11/25 經常要 (see) 東西都會出這個問題:
Callable in phaseB <function compyle_anonymous at 0x00000232B1164A68>: Circular reference detected
問問看有沒有 workaround ?
[ ] 13:42 2019/11/27 下回 release 要在 README.rst ~.md 裡明列有測過的系統:
1. Windows Anaconda DOSBox pyhon 3.7, DOSBox ipython, JupyterNotebook, JupyterLab
2. Colab (Ubuntu,Anaconda), Azure notebooks (Ubuntu), Notebooks.ai (Debian)
[X] 2020/07/27 08:33 可以把 [obj>keys] 'keys' 定義成 dir | dict>keys 這樣就不會與 dir 重複了。又可以與 jeforth 相容。
[X] 2020/07/27 08:38:15 value constant to 要重新定義,不要再用 vm.forth 存放了,改用 variable 自己 word.
See OneNote2020 > "Jeforth variable 變革" --> 成功了。
[ ] 考慮 projectk.py 本身也上 pypi , 可以 pip install projectk 更有意義!
[X] 07:49 2020/10/04 參考 KsanaVM 發現我原先對 prompt 的時機有誤解,改好了。
[ ] 15:49 2020/10/24 v1.25 好了以後 projectk.py 要 sync 回 projectk
[X] 15:30 2020/10/24 準備 release v1.25 to pypi so as to allow gom to have it easily
[X] 15:54 2020/10/24 先試試看 gom ok? --> Pass, 連 selftest 也都 pass.
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑
c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 若無 twine 則 pip install twine 很快很順
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote or Evernote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[ ] 所有 run 法帶 selftest:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[X] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
16:27 2020/10/24 不必這樣,因為 python setup.py install 灌好的 peforth v1.25 是
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth-1.25-py3.7.egg
而 pip install peforth 灌好的是另一個 peforth v1.25
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth\
兩個可以並存!而且後者優先。只要把後者 directory name 改成 peforth.disabled
就可以讓前者生效,前者是 local install 測試時有其方便性。
[X] 1. python -i -m peforth [X] with-selftest .s words exit bye
[X] 2. ipython -i -m peforth .' Hello World!!' cr bye
[/] 3. ipython import peforth .s words
[/] selftest peforth.ok() .s words <--- w/parent
[/] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[X] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[X] 5. jupyter notebook --> peforth kernel --> .s words
[X] 6. Gom 手動移除現有的 peforth directories from:
c:\Users\8304018\AppData\Roaming\gom\2020\python\..
然後從 SCRIPTING > Script Choice > pip install peforth > Tools > Install Python Package 灌 peforth 很快很順
import peforth, peforth_gom_port
執行 peforth.ok() 無誤。
新增 peforth_gom_port.py 放到 peforth repo 的 playground directory 裡。
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[ ] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[ ] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[ ] 同上 python test.py 先試試看
[ ] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[ ] 同上 repeat 1) python -m peforth 2) ipython -m peforth
[ ] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[ ] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[ ] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[ ] pip uninstall peforth
site-packages 下兩個 peforth folder 刪掉了。
setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會
被 skip 過去。
[ ] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[ ] 1. (i)python -i -m peforth [ ] no-selftest .s words exit
[ ] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[ ] 3. (i)python import peforth
[ ] no selftest, peforth.ok() .s words <--- no parent
[ ] 1234 bye check echo %errorlevel%
[ ] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[ ] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[ ] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了
[ ] pip install (use /mnt/...the wheel) to WSL ubuntu
[ ] ipython -m peforth
[ ] ipython , import peforth , magic commands
[ ] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply.
[ ] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[ ] Windows DOS 下試
[ ] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[ ] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[ ] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
\ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令
!cat /etc/os-release
%f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu!
%pwd \ 查看 working directory
[ ] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT
[ ] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
[ ] notebooks.ai 也測測看
[ ] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[X] version 改成 1.25 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
[ ] 要不要把 projectk.py sync 回 project-k
(很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的?
05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是
硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。
[X] 17:01 2020/10/24 v1.25 已經上了 pypi 也測過 Gom 成功,以上測試慢慢做,先上 github 再說。
[X] 14:22 2020/10/29 vm.prompt 是要給 gom port dialog 知道目前 prompt 否則只在 ok() 肚子裡。
[X] 13:52 2020/11/23 把 pypi 的 v1.25 直接換成 local 的 v1.26
--> 直接 copy __init__.py version.txt 蓋過去 c:\Users\8304018\AppData\Roaming\gom\2020\python\peforth
--> 11> <py> ok() </py> --> prompt 變成 ok , exit --> prompt 變回 11> 成功! 這就是 v1.26 無誤。
[X] 10:26 2020/11/26 改良 breakpoint 不需要改 peforth, 從 application 端外掛就可以了。
Usage of breakpoint:
peforth.bp(22,locals()) # drop breakpoint 22 with locals()
for i in [11,22,33]: peforth.bps[i]=0 # disable breakpoints 11,22,33
for i in [11,22,33]: peforth.bps[i]=i # enable breakpoints 11,22,33
peforth.bps=[i for i in range(1000)] # reload and enable all breakpoints
'exit' or ESC leaves the breakpoint and continue running.
'bye' to totally stop the script session.
# breakpoint
# peforth.bp() # drop a breakpoint using default prompt bp>
# peforth.bp(11) # drop a breakpoint using prompt bp11> w/p passing locals()
# peforth.bp(22,locals()) # drop a breakpoint using prompt bp22> with locals()
# peforth.bps=[] # disable all breakpoints
# peforth.dictate("peforth :: bps=[]") # disable all breakpoints
# peforth.dictate("peforth :: bps=[123,345,567]") # enable only listed breakpoints
# peforth.dictate("peforth :: bps[123]=0") # disable the breakpoint 123
# peforth.dictate("peforth :: pop(111)") # disable the breakpoint 111
# for i in [11,22,33]: peforth.bps[i]=0 # disable breakpoints 11,22,33
# peforth.bps=[i for i in range(1000)] # reload and enable all breakpoints
def bp(id=None,locals=None):
if id==None:
id = 0
prompt='bp> '
else:
prompt="bp{}>".format(id)
if id in peforth.bps: peforth.push(locals).ok(prompt, cmd="to _locals_")
peforth.bp = bp
peforth.bps = [i for i in range(1000)]
[X] 17:33 2020/12/07 配合 peforth.bp(22,locals()) 新增 bl be bd be* bd* 等指令
[ ] 17:34 2020/12/07 release v1.26 to pypi
[X] 17:37 2020/12/07 先試試看 gom ok? --> Pass, 連 selftest 也都 pass.
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑
c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 若無 twine 則 pip install twine 很快很順
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote or Evernote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[ ] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[ ] 所有 run 法帶 selftest:
[ ] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[ ] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py
[ ] Run python setup.py install 更新本地 site-package 版本以供測試
[ ] 要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
16:27 2020/10/24 不必這樣,因為 python setup.py install 灌好的 peforth v1.25 是
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth-1.25-py3.7.egg
而 pip install peforth 灌好的是另一個 peforth v1.25
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth\
兩個可以並存!而且後者優先。只要把後者 directory name 改成 peforth.disabled
就可以讓前者生效,前者是 local install 測試時有其方便性。
[ ] 1. python -i -m peforth [X] with-selftest .s words exit bye
[ ] 2. ipython -i -m peforth .' Hello World!!' cr bye
[ ] 3. ipython import peforth .s words
[/] selftest peforth.ok() .s words <--- w/parent
[/] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[ ] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[ ] 5. jupyter notebook --> peforth kernel --> .s words
[ ] 6. Gom 手動移除現有的 peforth directories from:
c:\Users\8304018\AppData\Roaming\gom\2020\python\..
然後從 SCRIPTING > Script Choice > pip install peforth > Tools > Install Python Package 灌 peforth 很快很順
import peforth, peforth_gom_port
執行 peforth.ok() 無誤。
新增 peforth_gom_port.py 放到 peforth repo 的 playground directory 裡。
[ ] 考慮 README.rst 改良
[ ] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[ ] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[ ] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[ ] 同上 python test.py 先試試看
[ ] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[ ] 同上 repeat 1) python -m peforth 2) ipython -m peforth
[ ] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[ ] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[ ] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[ ] pip uninstall peforth
site-packages 下兩個 peforth folder 刪掉了。
setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會
被 skip 過去。
[ ] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[ ] 1. (i)python -i -m peforth [ ] no-selftest .s words exit
[ ] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[ ] 3. (i)python import peforth
[ ] no selftest, peforth.ok() .s words <--- no parent
[ ] 1234 bye check echo %errorlevel%
[ ] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[ ] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[ ] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了
[ ] pip install (use /mnt/...the wheel) to WSL ubuntu
[ ] ipython -m peforth
[ ] ipython , import peforth , magic commands
[ ] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply.
[ ] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[ ] Windows DOS 下試
[ ] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[ ] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[ ] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
\ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令
!cat /etc/os-release
%f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu!
%pwd \ 查看 working directory
[ ] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT
[ ] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
[ ] notebooks.ai 也測測看
[ ] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[ ] 要不要把 projectk.py sync 回 project-k
(很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的?
05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是
硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。
[X] version 改成 1.27 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
| mit |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/scripts/build_coicop_legislation.py | 4 | 23938 | # -*- coding: utf-8 -*-
import numpy as np
import os
import pandas as pd
import pkg_resources
import build_coicop_nomenclature
legislation_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location,
'openfisca_france_indirect_taxation',
'assets',
'legislation',
)
sub_levels = ['divisions', 'groupes', 'classes', 'sous_classes', 'postes']
divisions = ['0{}'.format(i) for i in range(1, 10)] + ['11', '12'] # TODO: fix this
taxe_by_categorie_fiscale_number = {
0: '',
1: 'tva_taux_super_reduit',
2: 'tva_taux_reduit',
3: 'tva_taux_plein',
4: 'tva_taux_intermediaire',
7: 'cigarettes',
8: 'cigares',
9: 'tabac_a_rouler',
10: 'alcools_forts',
11: 'tva_taux_plein',
12: 'vin',
13: 'biere',
14: 'ticpe',
15: 'assurance_transport',
16: 'assurance_sante',
17: 'autres_assurances'
}
def extract_informations_from_coicop_to_categorie_fiscale():
def format_exceptions(exceptions):
grouped = exceptions.groupby(
by = [exceptions.annee - np.arange(exceptions.shape[0]), 'posteCOICOP', 'categoriefiscale']
)
for k, g in grouped:
print g.posteCOICOP.unique()[0], g.description.unique()[0], g.annee.min(), g.annee.max(), \
taxe_by_categorie_fiscale_number[int(g.categoriefiscale.unique())]
def get_dominant_and_exceptions(division):
assert division in divisions
parametres_fiscalite_file_path = os.path.join(legislation_directory, 'coicop_to_categorie_fiscale.csv')
parametres_fiscalite_data_frame = pd.read_csv(
parametres_fiscalite_file_path,
converters = {'posteCOICOP': unicode}
)
parametres_fiscalite_data_frame['division'] = parametres_fiscalite_data_frame['posteCOICOP'].str[:2].copy()
division_dataframe = parametres_fiscalite_data_frame.query('division == @division')
dominant_fiscal_category = division_dataframe.categoriefiscale.value_counts().argmax()
exceptions = division_dataframe.query('categoriefiscale != @dominant_fiscal_category')
return dominant_fiscal_category, exceptions
for coicop_division in divisions:
dominant, exceptions = get_dominant_and_exceptions(coicop_division)
print u'\nDivision: {}.\nCatégorie fiscale dominante: {}.\nExceptions:'.format(
coicop_division,
taxe_by_categorie_fiscale_number[dominant]
)
format_exceptions(exceptions)
def extract_infra_labels_from_coicop_code(coicop_nomenclature = None, coicop_code = None, label = None):
assert coicop_nomenclature is not None
assert coicop_code is not None
assert label is not None
known_levels = sub_levels[:len(coicop_code.split('.')) - 1]
coicop_sub_code = coicop_code[:len(coicop_code) - 2]
assert known_levels, 'No know levels for COICOP {}'.format(coicop_code)
labels_by_sub_level = coicop_nomenclature.loc[
coicop_nomenclature.code_coicop.str[:len(coicop_sub_code)] == coicop_sub_code,
['label_{}'.format(level[:-1]) for level in known_levels]
].drop_duplicates().dropna().to_dict(orient = 'records')[0]
for key, value in labels_by_sub_level.iteritems():
labels_by_sub_level[key] = value
modified_level = sub_levels[len(coicop_code.split('.')) - 1][:-1]
labels_by_sub_level['label_{}'.format(modified_level)] = label
return labels_by_sub_level
def apply_modification(coicop_nomenclature = None, value = None, categorie_fiscale = None,
origin = None, start = 1994, stop = 2014, label = ''):
assert coicop_nomenclature is not None
assert categorie_fiscale in taxe_by_categorie_fiscale_number.values()
assert 1994 <= start < stop <= 2014, "Invalid start={} and/or stop={}".format(start, stop)
if isinstance(value, int):
value_str = '0' + str(value) if value < 10 else str(value)
selection = coicop_nomenclature.code_coicop.str[:2] == value_str
elif isinstance(value, str):
selection = coicop_nomenclature.code_coicop.str[:len(value)] == value
elif isinstance(value, list):
selection = coicop_nomenclature.code_coicop.isin(value)
if selection.any(): # la coicop existe
filled_start_stop = (
(coicop_nomenclature.loc[selection, 'start'].unique() != 0).any() or
(coicop_nomenclature.loc[selection, 'stop'].unique() != 0).any()
)
if not filled_start_stop:
coicop_nomenclature.loc[selection, 'start'] = 1994
coicop_nomenclature.loc[selection, 'stop'] = 2014
coicop_nomenclature.loc[selection, 'categorie_fiscale'] = categorie_fiscale
else:
equal_start = coicop_nomenclature.start == start
equal_stop = coicop_nomenclature.stop == stop
selection_bis = selection & (coicop_nomenclature.start <= start) & (coicop_nomenclature.stop >= stop)
if (selection_bis & equal_start & equal_stop).any(): # meme intervalle
coicop_nomenclature.loc[
selection_bis & equal_start & equal_stop, 'categorie_fiscale'
] = categorie_fiscale
elif (selection_bis & equal_start).any(): # recouvrement au debut
coicop_nomenclature.loc[selection_bis & equal_start, 'start'] = stop + 1
coicop_copy = coicop_nomenclature.loc[selection_bis & equal_start].copy()
coicop_copy['categorie_fiscale'] = categorie_fiscale
coicop_copy['start'] = start
coicop_copy['stop'] = stop
coicop_nomenclature = coicop_nomenclature.append(coicop_copy)
coicop_nomenclature.reset_index(inplace = True, drop = True)
coicop_nomenclature.sort_values(by = 'code_coicop', inplace = True)
elif (selection_bis & equal_stop).any(): # recouvrement a la fin
coicop_nomenclature.loc[selection_bis & equal_stop, 'stop'] = start - 1
coicop_copy = coicop_nomenclature.loc[selection_bis & equal_stop].copy()
coicop_copy['categorie_fiscale'] = categorie_fiscale
coicop_copy['start'] = start
coicop_copy['stop'] = stop
coicop_nomenclature = coicop_nomenclature.append(coicop_copy)
coicop_nomenclature.reset_index(inplace = True, drop = True)
coicop_nomenclature.sort_values(by = 'code_coicop', inplace = True)
else: # recouvrement au milieu sans affecter les extermités
coicop_copy_inf = coicop_nomenclature.loc[selection_bis].copy()
coicop_copy_inf['stop'] = start - 1
coicop_copy_sup = coicop_nomenclature.loc[selection_bis].copy()
coicop_copy_sup['start'] = stop + 1
coicop_nomenclature.loc[selection_bis, 'categorie_fiscale'] = categorie_fiscale
coicop_nomenclature.loc[selection_bis, 'start'] = start
coicop_nomenclature.loc[selection_bis, 'stop'] = stop
coicop_nomenclature = coicop_nomenclature.append(coicop_copy_inf)
coicop_nomenclature = coicop_nomenclature.append(coicop_copy_sup)
coicop_nomenclature.reset_index(inplace = True, drop = True)
coicop_nomenclature.sort_values(by = 'code_coicop', inplace = True)
else:
assert origin is not None
assert label is not None
infra_labels = extract_infra_labels_from_coicop_code(coicop_nomenclature, str(value), label)
additional_row = pd.DataFrame(columns = coicop_nomenclature.columns)
additional_dict = {
'code_coicop': str(value),
'categorie_fiscale': categorie_fiscale,
'start': start,
'stop': stop,
'origin': origin
}
additional_dict.update(infra_labels)
for item, val in additional_dict.iteritems():
additional_row[item] = [val]
coicop_nomenclature = coicop_nomenclature.append(additional_row)
coicop_nomenclature.reset_index(inplace = True, drop = True)
coicop_nomenclature.sort_values(by = 'code_coicop', inplace = True)
# print coicop_nomenclature.categorie_fiscale.value_counts()
return coicop_nomenclature
def build_coicop_nomenclature_with_fiscal_categories(to_csv = False):
coicop_nomenclature = build_coicop_nomenclature.build_complete_coicop_nomenclature()
# On ajoute des colonnes
# période d'effet de la législation
coicop_nomenclature['start'] = 0
coicop_nomenclature['stop'] = 0
# origine de du poste
coicop_nomenclature['origin'] = 'COICOP INSEE'
# 01 Produits alimentaires et boissons non alcoolisées
# ils sont tous à taux réduit
alimentation = dict(
value = 1,
categorie_fiscale = 'tva_taux_reduit'
)
# sauf la margarine à taux plein
margarine = dict(
value = '01.1.5.2.2',
categorie_fiscale = 'tva_taux_plein',
)
saindoux = dict(
value = '01.1.5.2.3',
categorie_fiscale = 'tva_taux_reduit',
label = "Saindoux autres graisses d'origine animale",
origin = 'TAXIPP',
)
# et les confiseries et le chocolat à taux plein http://bofip.impots.gouv.fr/bofip/1438-PGP.html
confiserie = dict(
value = ['01.1.8.1.3', '01.1.8.2.1', '01.1.8.2.2'],
categorie_fiscale = 'tva_taux_plein'
)
# 02 Boissons alcoolisées et tabac
# alccols forts
alcools = dict(
value = '02.1.1',
categorie_fiscale = 'alcools_forts',
)
# vins et boissons fermentées
vin = dict(
value = '02.1.2',
categorie_fiscale = 'vin',
)
# bière
biere = dict(
value = '02.1.3',
categorie_fiscale = 'biere',
)
# tabac
cigares = dict(
value = '02.2.1',
categorie_fiscale = 'cigares',
label = 'Cigares et cigarillos',
origin = 'TAXIPP',
)
cigarettes = dict(
value = '02.2.2',
categorie_fiscale = 'cigarettes',
label = 'Cigarettes',
origin = 'TAXIPP',
)
tabac_a_rouler = dict(
value = '02.2.3',
categorie_fiscale = 'tabac_a_rouler',
label = 'Tabac a rouler', # TODO je n'arrive aps à mettre des accents
origin = 'TAXIPP',
)
stupefiants = dict(
value = '02.3',
categorie_fiscale = '',
label = 'Stupefiants',
origin = 'COICOP UN',
)
# 03 Habillement et chaussures
habillement = dict(
value = 3,
categorie_fiscale = 'tva_taux_plein'
)
# 04 Logement, eau, gaz, électricité et autres combustibles
logement = dict(
value = 4,
categorie_fiscale = 'tva_taux_plein',
)
# sauf distribution d'eau, enlèvement des ordures ménagères, assainissement, autres services liés au logement n.d.a.
# qui sont au taux réduit de 1994 à 2011
eau_ordures_assainissement = dict(
value = ['04.4.1.1.1', '04.4.1.2.1', '04.4.1.3.1'],
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
# avant de passer au taux intermédiaire
eau_ordures_assainissement_reforme_2012 = dict(
value = ['04.4.1.1.1', '04.4.1.2.1', '04.4.1.3.1'],
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# et pas de taxation des loyers
loyers = dict(
value = ['04.1.1.1.1', '04.1.1.2.1'],
categorie_fiscale = '',
)
# TODO ajouter loyers fictifs
# 05 Ameublement, équipement ménager et entretien courant de la maison
ameublement = dict(
value = 5,
categorie_fiscale = 'tva_taux_plein',
)
# sauf Services domestiques et autres services pour l'habitation
services_domestiques = dict(
value = '05.6.2',
categorie_fiscale = 'tva_taux_reduit',
)
# 06 Santé pas taxée
sante = dict(
value = 6,
categorie_fiscale = '',
)
# sauf pharmacie
pharmacie = dict(
value = '06.1.1.1',
categorie_fiscale = 'tva_taux_super_reduit',
)
# parapharmacie
parapharmacie = dict(
value = '06.1.1.2',
categorie_fiscale = 'tva_taux_plein',
)
# materiel therapeutique
materiel_therapeutique = dict(
value = '06.1.1.3',
categorie_fiscale = 'tva_taux_reduit',
)
# 07 Transports
transports = dict(
value = 7,
categorie_fiscale = 'tva_taux_plein',
)
# Transport combine de passagers change en 2012 #
transport_combine_passagers = dict(
value = '07.3.5',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
transport_combine_passagers_reforme_2012 = dict(
value = '07.3.5',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Transport maritime et fluvial de passagers change en 2011 Attention 07.3.4 dans enquête BDF
transport_maritime = dict(
value = '07.3.6',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
transport_maritime_reforme_2012 = dict(
value = '07.3.6',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Transport aérien de passagers change en 2012
transport_aerien = dict(
value = '07.3.3',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
transport_aerien_reforme_2012 = dict(
value = '07.3.3',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Transport routier de passagers 1994 2011 change en 2012
transport_routier = dict(
value = '07.3.2',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
transport_routier_reforme_2012 = dict(
value = '07.3.2',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Transport ferroviaire de passagers change en 2012
transport_ferroviaire = dict(
value = '07.3.1',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
transport_ferroviaire_reforme_2012 = dict(
value = '07.3.1',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Carburants et lubrifiants pour véhicules de tourisme 1994 2014
carburants_lubrifiants = dict(
value = '07.2.2',
categorie_fiscale = 'ticpe',
)
# 08 Communications
communications = dict(
value = 8,
categorie_fiscale = 'tva_taux_plein',
)
services_postaux = dict(
value = '08.1.1.1',
categorie_fiscale = '',
)
# 09 Loisirs et cutures
loisirs_cuture = dict(
value = 9,
categorie_fiscale = 'tva_taux_plein',
)
# Journaux et publications périodiques']
journaux_periodiques = dict(
value = '09.5.2',
categorie_fiscale = 'tva_taux_super_reduit',
)
# Livre'
livre = dict(
value = '09.5.1',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
livre_reforme_2012 = dict(
value = '09.5.1',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Jeux de hasard
jeux_hasard = dict(
value = '09.4.3',
categorie_fiscale = '',
label = 'Jeux de hasard',
origin = 'COICOP UN',
)
# Services culturels
services_culturels = dict(
value = '09.4.2',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
# Services culturels
services_culturels_reforme_2012 = dict(
value = '09.4.2',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Services récréatifs et sportifs
services_recreatifs_sportifs = dict(
value = '09.4.1',
categorie_fiscale = 'tva_taux_reduit',
stop = 2011,
)
services_recreatifs_sportifs_reforme_2012 = dict(
value = '09.4.1',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# 10 Education
education = dict(
value = 10,
categorie_fiscale = '',
)
# 11 Hotellerie restauration
hotellerie_restauration = dict(
value = 11,
categorie_fiscale = 'tva_taux_reduit',
)
# Consommation de boissons alcoolisées
consommation_boissons_alcoolisees = dict(
value = ['11.1.1.2.2', '11.1.1.2.3', '11.1.1.2.4'],
categorie_fiscale = 'tva_taux_plein', # TODO sauf en corse à 10%
)
# Restauration sur place 1994 2009
restauration_sur_place = dict(
value = '11.1.1.1.1',
categorie_fiscale = 'tva_taux_plein',
stop = 2009,
)
restauration_sur_place_reforme_2010 = dict(
value = '11.1.1.1.1',
categorie_fiscale = 'tva_taux_reduit',
start = 2010,
stop = 2011,
)
# Restauration sur place 2012 2014
restauration_sur_place_reforme_2012 = dict(
value = '11.1.1.1.1',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Restauration à emporter 1994 1997
restauration_a_emporter = dict(
value = '11.1.1.1.2',
categorie_fiscale = 'tva_taux_plein',
stop = 1997,
)
# Restauration à emporter 2010 2011
restauration_a_emporter_reforme_2010 = dict(
value = '11.1.1.1.2',
categorie_fiscale = 'tva_taux_reduit',
start = 1998,
stop = 2011,
)
# Restauration à emporter 2012 2014
restauration_a_emporter_reforme_2012 = dict(
value = '11.1.1.1.2',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Cantines'] 1994
cantines = dict(
value = '11.1.2',
categorie_fiscale = '',
)
# Services d'hébergement 2012 2014
service_hebergement = dict(
value = '11.2.1',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# 12 Autres biens et services
autres_biens_et_services = dict(
value = 12,
categorie_fiscale = 'tva_taux_plein',
)
# Prostitution
prostitution = dict(
value = '12.2',
categorie_fiscale = '',
label = 'Prostitution',
origin = 'COICOP UN',
)
# Protection sociale TODO: check tva_taux_plein avant 2000
protection_sociale_reforme_2000 = dict(
value = '12.4',
categorie_fiscale = 'tva_taux_reduit',
start = 2000,
stop = 2011,
)
# Protection sociale
protection_sociale_reforme_2012 = dict(
value = '12.4',
categorie_fiscale = 'tva_taux_intermediaire',
start = 2012,
)
# Autres assurances
autres_assurances = dict(
value = '12.5.5',
categorie_fiscale = 'autres_assurances',
label = 'Autres assurances',
origin = 'COICOP UN',
)
# Assurance_transports
assurance_transports = dict(
value = '12.5.4',
categorie_fiscale = 'assurance_transport',
)
# Assurance maladie
assurance_maladie = dict(
value = '12.5.3',
categorie_fiscale = 'assurance_sante',
)
# Assurance habitation
assurance_habitation = dict(
value = '12.5.2',
categorie_fiscale = 'autres_assurances',
)
# Assurance vie
assurance_vie = dict(
value = '12.5.1',
categorie_fiscale = 'autres_assurances',
label = 'Assurance vie',
origin = 'COICOP UN',
)
# Couts des services d'intermédiation financière indirectement mesurés
intermediation_financiere = dict(
value = '12.6',
categorie_fiscale = '',
)
for member in [
# 01
alimentation,
margarine, saindoux, confiserie,
# 02
alcools, vin, biere,
cigares, cigarettes, tabac_a_rouler, stupefiants,
# 03
habillement,
# 04
logement, eau_ordures_assainissement, eau_ordures_assainissement_reforme_2012, loyers,
# 05
ameublement, services_domestiques,
# 06
sante, pharmacie, parapharmacie, materiel_therapeutique,
# 07
transports,
transport_combine_passagers, transport_combine_passagers_reforme_2012,
transport_maritime, transport_maritime_reforme_2012,
transport_aerien, transport_aerien_reforme_2012,
transport_routier, transport_routier_reforme_2012,
transport_ferroviaire, transport_ferroviaire_reforme_2012,
carburants_lubrifiants,
# 08
communications, services_postaux,
# 09
loisirs_cuture, journaux_periodiques, livre, livre_reforme_2012, jeux_hasard,
services_culturels, services_culturels_reforme_2012,
services_recreatifs_sportifs, services_recreatifs_sportifs_reforme_2012,
# 10 Education
education,
# 11 Hotellerie restauration
hotellerie_restauration, cantines, service_hebergement,
consommation_boissons_alcoolisees,
restauration_sur_place, restauration_sur_place_reforme_2010, restauration_sur_place_reforme_2012,
restauration_a_emporter, restauration_a_emporter_reforme_2010, restauration_a_emporter_reforme_2012,
# 12
autres_biens_et_services,
protection_sociale_reforme_2000, protection_sociale_reforme_2012,
prostitution,
intermediation_financiere,
autres_assurances, assurance_transports, assurance_vie, assurance_maladie, assurance_habitation,
]:
coicop_nomenclature = apply_modification(coicop_nomenclature, **member)
coicop_legislation = coicop_nomenclature.copy()
if to_csv:
coicop_legislation.to_csv(
os.path.join(legislation_directory, 'coicop_legislation.csv'),
)
return coicop_legislation.copy()
def get_categorie_fiscale(value, year = None, assertion_error = True):
coicop_nomenclature = pd.read_csv(
os.path.join(legislation_directory, 'coicop_legislation.csv'),
converters = {'posteCOICOP': unicode}
)
build_coicop_nomenclature_with_fiscal_categories()
if isinstance(value, int):
value_str = '0' + str(value) if value < 10 else str(value)
selection = coicop_nomenclature.code_coicop.str[:2] == value_str
elif isinstance(value, str):
selection = coicop_nomenclature.code_coicop.str[:len(value)] == value
elif isinstance(value, list):
selection = coicop_nomenclature.code_coicop.isin(value)
if year is not None:
selection = selection & (coicop_nomenclature.start <= year) & (year <= coicop_nomenclature.stop)
categorie_fiscale = coicop_nomenclature.loc[selection, 'categorie_fiscale'].unique()
if assertion_error:
assert len(categorie_fiscale) == 1, 'Ther categorie fiscale is not unique. Candidates are: {}'.format(
categorie_fiscale)
return categorie_fiscale[0]
else:
return categorie_fiscale
def test_coicop_legislation():
coicop_nomenclature = build_coicop_nomenclature_with_fiscal_categories(to_csv = True)
if coicop_nomenclature.categorie_fiscale.isnull().any():
return coicop_nomenclature.loc[coicop_nomenclature.categorie_fiscale.isnull()]
if __name__ == "__main__":
# extract_informations_from_coicop_to_categorie_fiscale()
coicop_nomenclature = build_coicop_nomenclature_with_fiscal_categories(to_csv = True)
# print test_coicop_legislation(coicop_nomenclature)
# TODO créer des sous-catégories pour tabac
# print get_categorie_fiscale('11.1.1.1.1', year = 2010)
| agpl-3.0 |
aabadie/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/approx_mc_prediction.py | 1 | 2661 | import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
# NOTE: this is only policy evaluation, not optimization
# we'll try to obtain the same result as our other MC script
from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS
LEARNING_RATE = 0.001
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
# found by policy_iteration_random on standard_grid
# MC method won't get exactly this, but should be close
# values:
# ---------------------------
# 0.43| 0.56| 0.72| 0.00|
# ---------------------------
# 0.33| 0.00| 0.21| 0.00|
# ---------------------------
# 0.25| 0.18| 0.11| -0.17|
# policy:
# ---------------------------
# R | R | R | |
# ---------------------------
# U | | U | |
# ---------------------------
# U | L | U | L |
policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'U',
(2, 1): 'L',
(2, 2): 'U',
(2, 3): 'L',
}
# initialize theta
# our model is V_hat = theta.dot(x)
# where x = [row, col, row*col, 1] - 1 for bias term
theta = np.random.randn(4) / 2
def s2x(s):
return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1])
# repeat until convergence
deltas = []
t = 1.0
for it in range(20000):
if it % 100 == 0:
t += 0.01
alpha = LEARNING_RATE/t
# generate an episode using pi
biggest_change = 0
states_and_returns = play_game(grid, policy)
seen_states = set()
for s, G in states_and_returns:
# check if we have already seen s
# called "first-visit" MC policy evaluation
if s not in seen_states:
old_theta = theta.copy()
x = s2x(s)
V_hat = theta.dot(x)
# grad(V_hat) wrt theta = x
theta += alpha*(G - V_hat)*x
biggest_change = max(biggest_change, np.abs(old_theta - theta).sum())
seen_states.add(s)
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
# obtain predicted values
V = {}
states = grid.all_states()
for s in states:
if s in grid.actions:
V[s] = theta.dot(s2x(s))
else:
# terminal state or state we can't otherwise get to
V[s] = 0
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/computation/eval.py | 7 | 10856 | #!/usr/bin/env python
"""Top level ``eval`` module.
"""
import warnings
import tokenize
from pandas.io.formats.printing import pprint_thing
from pandas.core.computation import _NUMEXPR_INSTALLED
from pandas.core.computation.expr import Expr, _parsers, tokenize_string
from pandas.core.computation.scope import _ensure_scope
from pandas.compat import string_types
from pandas.core.computation.engines import _engines
from pandas.util._validators import validate_bool_kwarg
def _check_engine(engine):
"""Make sure a valid engine is passed.
Parameters
----------
engine : str
Raises
------
KeyError
* If an invalid engine is passed
ImportError
* If numexpr was requested but doesn't exist
Returns
-------
string engine
"""
if engine is None:
if _NUMEXPR_INSTALLED:
engine = 'numexpr'
else:
engine = 'python'
if engine not in _engines:
raise KeyError('Invalid engine {0!r} passed, valid engines are'
' {1}'.format(engine, list(_engines.keys())))
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == 'numexpr':
if not _NUMEXPR_INSTALLED:
raise ImportError("'numexpr' is not installed or an "
"unsupported version. Cannot use "
"engine='numexpr' for query/eval "
"if 'numexpr' is not installed")
return engine
def _check_parser(parser):
"""Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in _parsers:
raise KeyError('Invalid parser {0!r} passed, valid parsers are'
' {1}'.format(parser, _parsers.keys()))
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, '__getitem__'):
name = type(resolver).__name__
raise TypeError('Resolver of type %r does not implement '
'the __getitem__ method' % name)
def _check_expression(expr):
"""Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr):
"""Convert an object to an expression.
Thus function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
s : unicode
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = pprint_thing(expr)
_check_expression(s)
return s
def _check_for_locals(expr, stack_level, parser):
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != 'pandas'
if not_pandas_parser:
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = ("The '@' prefix is not allowed in "
"top-level eval calls, \nplease refer to "
"your variables by name without the '@' "
"prefix")
if at_top_of_stack or not_pandas_parser:
for toknum, tokval in tokenize_string(expr):
if toknum == tokenize.OP and tokval == '@':
raise SyntaxError(msg)
def eval(expr, parser='pandas', engine=None, truediv=True,
local_dict=None, global_dict=None, resolvers=(), level=0,
target=None, inplace=None):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~pandas.DataFrame.query` method to inject the
:attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : a target object for assignment, optional, default is None
essentially this is a passed in resolver
inplace : bool, default True
If expression mutates, whether to modify object inplace or return
copy with mutation.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.eval
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
first_expr = True
if isinstance(expr, string_types):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != '']
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError("multi-line expressions are only valid in the "
"context of data, use DataFrame.eval")
first_expr = True
for expr in exprs:
expr = _convert_expression(expr)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = _ensure_scope(level + 1, global_dict=global_dict,
local_dict=local_dict, resolvers=resolvers,
target=target)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None and multi_line:
raise ValueError("Multi-line expressions are only valid"
" if all expressions contain an assignment")
# assign if needed
if env.target is not None and parsed_expr.assigner is not None:
if inplace is None:
warnings.warn(
"eval expressions containing an assignment currently"
"default to operating inplace.\nThis will change in "
"a future version of pandas, use inplace=True to "
"avoid this warning.",
FutureWarning, stacklevel=3)
inplace = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
target = env.target.copy()
else:
target = env.target
target[parsed_expr.assigner] = ret
if not resolvers:
resolvers = ({parsed_expr.assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if parsed_expr.assigner in resolver:
resolver[parsed_expr.assigner] = ret
break
else:
resolvers += ({parsed_expr.assigner: ret},)
ret = None
first_expr = False
if not inplace and inplace is not None:
return target
return ret
| mit |
winklerand/pandas | pandas/tests/sparse/test_arithmetics.py | 18 | 19342 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayArithmetics(object):
_base = np.array
_klass = pd.SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid='ignore', divide='ignore'):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
# sparse & sparse
self._assert((a + b).to_dense(), a_dense + b_dense)
self._assert((b + a).to_dense(), b_dense + a_dense)
self._assert((a - b).to_dense(), a_dense - b_dense)
self._assert((b - a).to_dense(), b_dense - a_dense)
self._assert((a * b).to_dense(), a_dense * b_dense)
self._assert((b * a).to_dense(), b_dense * a_dense)
# pandas uses future division
self._assert((a / b).to_dense(), a_dense * 1.0 / b_dense)
self._assert((b / a).to_dense(), b_dense * 1.0 / a_dense)
# ToDo: FIXME in GH 13843
if not (self._base == pd.Series and a.dtype == 'int64'):
self._assert((a // b).to_dense(), a_dense // b_dense)
self._assert((b // a).to_dense(), b_dense // a_dense)
self._assert((a % b).to_dense(), a_dense % b_dense)
self._assert((b % a).to_dense(), b_dense % a_dense)
self._assert((a ** b).to_dense(), a_dense ** b_dense)
self._assert((b ** a).to_dense(), b_dense ** a_dense)
# sparse & dense
self._assert((a + b_dense).to_dense(), a_dense + b_dense)
self._assert((b_dense + a).to_dense(), b_dense + a_dense)
self._assert((a - b_dense).to_dense(), a_dense - b_dense)
self._assert((b_dense - a).to_dense(), b_dense - a_dense)
self._assert((a * b_dense).to_dense(), a_dense * b_dense)
self._assert((b_dense * a).to_dense(), b_dense * a_dense)
# pandas uses future division
self._assert((a / b_dense).to_dense(), a_dense * 1.0 / b_dense)
self._assert((b_dense / a).to_dense(), b_dense * 1.0 / a_dense)
# ToDo: FIXME in GH 13843
if not (self._base == pd.Series and a.dtype == 'int64'):
self._assert((a // b_dense).to_dense(), a_dense // b_dense)
self._assert((b_dense // a).to_dense(), b_dense // a_dense)
self._assert((a % b_dense).to_dense(), a_dense % b_dense)
self._assert((b_dense % a).to_dense(), b_dense % a_dense)
self._assert((a ** b_dense).to_dense(), a_dense ** b_dense)
self._assert((b_dense ** a).to_dense(), b_dense ** a_dense)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert res.dtype == np.bool
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid='ignore'):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
def test_float_scalar(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
def test_float_scalar_comparison(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
values = self._base([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = self._base([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_same_index_comparison(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = self._base([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_different_kind(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind='integer')
b = self._klass(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind='integer', fill_value=0)
b = self._klass(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind='integer', fill_value=0)
b = self._klass(rvalues, kind='block', fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind='integer', fill_value=1)
b = self._klass(rvalues, kind='block', fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_comparison(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self):
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
for kind in ['integer', 'block']:
a = self._klass(values, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
def test_int_array_comparison(self):
# int32 NI ATM
for dtype in ['int64']:
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
for kind in ['integer', 'block']:
a = self._klass(values, dtype=dtype, kind=kind)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_bool_same_index(self):
# GH 14000
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, True, True], dtype=np.bool)
for fill_value in [True, False, np.nan]:
a = self._klass(values, kind=kind, dtype=np.bool,
fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool,
fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_bool_array_logical(self):
# GH 14000
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([True, False, True, False, True, True],
dtype=np.bool)
rvalues = self._base([True, False, False, True, False, True],
dtype=np.bool)
for fill_value in [True, False, np.nan]:
a = self._klass(values, kind=kind, dtype=np.bool,
fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool,
fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self):
for rdtype in ['int64']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
def test_mixed_array_comparison(self):
# int32 NI ATM
for rdtype in ['int64']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
class TestSparseSeriesArithmetic(TestSparseArrayArithmetics):
_base = pd.Series
_klass = pd.SparseSeries
def _assert(self, a, b):
tm.assert_series_equal(a, b)
def test_alignment(self):
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[1, 2, 3, 4])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(np.arange(4), index=[1, 2, 3, 4],
dtype=np.int64, fill_value=0)
self._check_numeric_ops(sa, sb, da, db)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(np.arange(4), index=[1, 2, 3, 4],
dtype=np.int64, fill_value=np.nan)
self._check_numeric_ops(sa, sb, da, db)
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[10, 11, 12, 13])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(np.arange(4), index=[10, 11, 12, 13],
dtype=np.int64, fill_value=0)
self._check_numeric_ops(sa, sb, da, db)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(np.arange(4), index=[10, 11, 12, 13],
dtype=np.int64, fill_value=np.nan)
self._check_numeric_ops(sa, sb, da, db)
| bsd-3-clause |
plissonf/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |