repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
loganlinn/mlia | resources/Ch10/kMeans.py | 3 | 6419 | '''
Created on Feb 16, 2011
k Means Clustering for Ch10 of Machine Learning in Action
@author: Peter Harrington
'''
from numpy import *
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
return centroids
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))#create mat to assign data points
#to a centroid, also holds SE of each point
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
if distJI < minDist:
minDist = distJI; minIndex = j
if clusterAssment[i,0] != minIndex: clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
print centroids
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster
centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean
return centroids, clusterAssment
def biKmeans(dataSet, k, distMeas=distEclud):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))
centroid0 = mean(dataSet, axis=0).tolist()[0]
centList =[centroid0] #create a list with one centroid
for j in range(m):#calc initial Error
clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)):
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
print 'the bestCentToSplit is: ',bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids
centList.append(bestNewCents[1,:].tolist()[0])
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
return mat(centList), clusterAssment
import urllib
import json
def geoGrab(stAddress, city):
apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
params = {}
params['flags'] = 'J'#JSON return type
params['appid'] = 'aaa0VN6k'
params['location'] = '%s %s' % (stAddress, city)
url_params = urllib.urlencode(params)
yahooApi = apiStem + url_params #print url_params
print yahooApi
c=urllib.urlopen(yahooApi)
return json.loads(c.read())
from time import sleep
def massPlaceFind(fileName):
fw = open('places.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
retDict = geoGrab(lineArr[1], lineArr[2])
if retDict['ResultSet']['Error'] == 0:
lat = float(retDict['ResultSet']['Results'][0]['latitude'])
lng = float(retDict['ResultSet']['Results'][0]['longitude'])
print "%s\t%f\t%f" % (lineArr[0], lat, lng)
fw.write('%s\t%f\t%f\n' % (line, lat, lng))
else: print "error fetching"
sleep(1)
fw.close()
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
import matplotlib
import matplotlib.pyplot as plt
def clusterClubs(numClust=5):
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show()
| epl-1.0 |
frrp/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
verificarlo/verificarlo | src/tools/ci/vfc_ci_report/inspect_runs.py | 1 | 24408 | #############################################################################
# #
# This file is part of Verificarlo. #
# #
# Copyright (c) 2015-2021 #
# Verificarlo contributors #
# Universite de Versailles St-Quentin-en-Yvelines #
# CMLA, Ecole Normale Superieure de Cachan #
# #
# Verificarlo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Verificarlo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Verificarlo. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
# Manage the view comparing the variables of a run
# Manage the view comparing a variable over different runs
# At its creation, an InspectRuns object will create all the needed Bokeh widgets
# and plots, setup the callback functions (either server side or client side),
# initialize widgets selection, and from this selection generate the first plots.
# Then, when callback functions are triggered, widgets selections are updated,
# and plots are re-generated with the newly selected data.
from math import pi
from functools import partial
import pandas as pd
import numpy as np
from bokeh.plotting import figure, curdoc
from bokeh.embed import components
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool,\
RadioButtonGroup, CheckboxGroup, CustomJS
import helper
import plot
##########################################################################
class InspectRuns:
# Helper functions related to InspectRun
def gen_runs_selection(self):
'''
Returns a dictionary mapping user-readable strings to all run timestamps
'''
runs_dict = {}
# Iterate over timestamp rows (runs) and fill dict
for row in self.metadata.iloc:
# The syntax used by pandas makes this part a bit tricky :
# row.name is the index of metadata (so it refers to the
# timestamp), whereas row["name"] is the column called "name"
# (which is the display string used for the run)
# runs_dict[run's name] = run's timestamp
runs_dict[row["name"]] = row.name
return runs_dict
def gen_boxplot_tooltips(self, prefix):
return [
("Name", "@%s_x" % prefix),
("Min", "@" + prefix + "_min{%0.18e}"),
("Max", "@" + prefix + "_max{%0.18e}"),
("1st quartile", "@" + prefix + "_quantile25{%0.18e}"),
("Median", "@" + prefix + "_quantile50{%0.18e}"),
("3rd quartile", "@" + prefix + "_quantile75{%0.18e}"),
("μ", "@" + prefix + "_mu{%0.18e}"),
("Number of samples (tests)", "@nsamples")
]
def gen_boxplot_tooltips_formatters(self, prefix):
return {
"@%s_min" % prefix: "printf",
"@%s_max" % prefix: "printf",
"@%s_quantile25" % prefix: "printf",
"@%s_quantile50" % prefix: "printf",
"@%s_quantile75" % prefix: "printf",
"@%s_mu" % prefix: "printf"
}
# Data processing helper
# (computes new distributions for sigma, s2, s10)
def data_processing(self, dataframe):
# Compute aggragated mu
dataframe["mu"] = np.vectorize(
np.average)(
dataframe["mu"],
weights=dataframe["nsamples"])
# nsamples is the number of aggregated elements (as well as the number
# of samples for our new sigma and s distributions)
dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x))
dataframe["mu_x"] = dataframe.index
# Make sure that strings don't excede a certain length
dataframe["mu_x"] = dataframe["mu_x"].apply(
lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x
)
# Get quantiles and mu for sigma, s10, s2
for prefix in ["sigma", "s10", "s2"]:
dataframe["%s_x" % prefix] = dataframe["mu_x"]
dataframe[prefix] = dataframe[prefix].apply(np.sort)
dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min)
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.25,))
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.50,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.75,))
dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max)
dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average)
del dataframe[prefix]
return dataframe
# Plots update function
def update_plots(self):
groupby_display = self.widgets["groupby_radio"].labels[
self.widgets["groupby_radio"].active
]
groupby = self.factors_dict[groupby_display]
filterby_display = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby_display]
# Groupby and aggregate lines belonging to the same group in lists
groups = self.run_data[
self.run_data.index.isin(
[self.widgets["select_filter"].value],
level=filterby
)
].groupby(groupby)
groups = groups.agg({
"sigma": lambda x: x.tolist(),
"s10": lambda x: x.tolist(),
"s2": lambda x: x.tolist(),
"mu": lambda x: x.tolist(),
# Used for mu weighted average first, then will be replaced
"nsamples": lambda x: x.tolist()
})
# Compute the new distributions, ...
groups = self.data_processing(groups).to_dict("list")
# Update source
# Assign each ColumnDataSource, starting with the boxplots
for prefix in ["sigma", "s10", "s2"]:
dict = {
"%s_x" % prefix: groups["%s_x" % prefix],
"%s_min" % prefix: groups["%s_min" % prefix],
"%s_quantile25" % prefix: groups["%s_quantile25" % prefix],
"%s_quantile50" % prefix: groups["%s_quantile50" % prefix],
"%s_quantile75" % prefix: groups["%s_quantile75" % prefix],
"%s_max" % prefix: groups["%s_max" % prefix],
"%s_mu" % prefix: groups["%s_mu" % prefix],
"nsamples": groups["nsamples"]
}
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
# Boxplots will be filtered by max then min
top_outliers = helper.detect_outliers(dict["%s_max" % prefix])
helper.remove_boxplot_outliers(dict, top_outliers, prefix)
bottom_outliers = helper.detect_outliers(
dict["%s_min" % prefix])
helper.remove_boxplot_outliers(dict, bottom_outliers, prefix)
self.sources["%s_source" % prefix].data = dict
# Finish with the mu plot
dict = {
"mu_x": groups["mu_x"],
"mu": groups["mu"],
"nsamples": groups["nsamples"]
}
self.sources["mu_source"].data = dict
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
mu_outliers = helper.detect_outliers(groups["mu"])
groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers)
groups["mu_x"] = helper.remove_outliers(
groups["mu_x"], mu_outliers)
# Update plots axis/titles
# Get display string of the last (unselected) factor
factors_dict = self.factors_dict.copy()
del factors_dict[groupby_display]
del factors_dict[filterby_display]
for_all = list(factors_dict.keys())[0]
# Update all display strings for plot title (remove caps, plural)
groupby_display = groupby_display.lower()
filterby_display = filterby_display.lower()[:-1]
for_all = for_all.lower()
self.plots["mu_inspect"].title.text = \
"Empirical average μ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
self.plots["sigma_inspect"].title.text = \
"Standard deviation σ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
self.plots["s10_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
self.plots["s2_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"])
helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"])
helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"])
helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"])
# Widets' callback functions
# Run selector callback
def update_run(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Update run selection (by using dict mapping)
self.current_run = self.runs_dict[new]
# Update run data
self.run_data = self.data[self.data["timestamp"] == self.current_run]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# "Group by" radio
def update_groupby(self, attrname, old, new):
# Update "Filter by" radio list
filterby_list = list(self.factors_dict.keys())
del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"].labels = filterby_list
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# "Filter by" radio
def update_filterby(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter selector options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# Filter selector callback
def update_filter(self, attrname, old, new):
self.update_plots()
# Filter outliers checkbox callback
def update_outliers_filtering(self, attrname, old, new):
# The status (checked/unchecked) of the checkbox is also verified inside
# self.update_plots(), so calling this function is enough
self.update_plots()
# Bokeh setup functions
# (for both variable and backend selection at once)
def setup_plots(self):
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
# Tooltips and formatters
dotplot_tooltips = [
("Name", "@mu_x"),
("μ", "@mu{%0.18e}"),
("Number of samples (tests)", "@nsamples")
]
dotplot_formatters = {
"@mu": "printf"
}
sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma")
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"sigma")
s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10")
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s10")
s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2")
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s2")
# Plots
# Mu plot
self.plots["mu_inspect"] = figure(
name="mu_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
plot.fill_dotplot(
self.plots["mu_inspect"], self.sources["mu_source"], "mu",
tooltips=dotplot_tooltips,
tooltips_formatters=dotplot_formatters
)
self.doc.add_root(self.plots["mu_inspect"])
# Sigma plot
self.plots["sigma_inspect"] = figure(
name="sigma_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
plot.fill_boxplot(
self.plots["sigma_inspect"],
self.sources["sigma_source"],
prefix="sigma",
tooltips=sigma_boxplot_tooltips,
tooltips_formatters=sigma_boxplot_tooltips_formatters)
self.doc.add_root(self.plots["sigma_inspect"])
# s plots
self.plots["s10_inspect"] = figure(
name="s10_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode='scale_width'
)
plot.fill_boxplot(
self.plots["s10_inspect"],
self.sources["s10_source"],
prefix="s10",
tooltips=s10_boxplot_tooltips,
tooltips_formatters=s10_boxplot_tooltips_formatters)
s10_tab_inspect = Panel(
child=self.plots["s10_inspect"],
title="Base 10")
self.plots["s2_inspect"] = figure(
name="s2_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode='scale_width'
)
plot.fill_boxplot(
self.plots["s2_inspect"], self.sources["s2_source"], prefix="s2",
tooltips=s2_boxplot_tooltips,
tooltips_formatters=s2_boxplot_tooltips_formatters
)
s2_tab_inspect = Panel(child=self.plots["s2_inspect"], title="Base 2")
s_tabs_inspect = Tabs(
name="s_tabs_inspect",
tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location="below"
)
self.doc.add_root(s_tabs_inspect)
def setup_widgets(self):
# Generation of selectable items
# Dict contains all inspectable runs (maps display strings to timestamps)
# The dict structure allows to get the timestamp from the display string
# in O(1)
self.runs_dict = self.gen_runs_selection()
# Dict maps display strings to column names for the different factors
# (var, backend, test)
self.factors_dict = {
"Variables": "variable",
"Backends": "vfc_backend",
"Tests": "test"
}
# Run selection
# Contains all options strings
runs_display = list(self.runs_dict.keys())
# Will be used when updating plots (contains actual number)
self.current_run = self.runs_dict[runs_display[-1]]
# Contains the selected option string, used to update current_n_runs
current_run_display = runs_display[-1]
# This contains only entries matching the run
self.run_data = self.data[self.data["timestamp"] == self.current_run]
change_run_callback_js = "updateRunMetadata(cb_obj.value);"
self.widgets["select_run"] = Select(
name="select_run", title="Run :",
value=current_run_display, options=runs_display
)
self.doc.add_root(self.widgets["select_run"])
self.widgets["select_run"].on_change("value", self.update_run)
self.widgets["select_run"].js_on_change("value", CustomJS(
code=change_run_callback_js,
args=(dict(
metadata=helper.metadata_to_dict(
helper.get_metadata(self.metadata, self.current_run)
)
))
))
# Factors selection
# "Group by" radio
self.widgets["groupby_radio"] = RadioButtonGroup(
name="groupby_radio",
labels=list(self.factors_dict.keys()), active=0
)
self.doc.add_root(self.widgets["groupby_radio"])
# The functions are defined inside the template to avoid writing too
# much JS server side
self.widgets["groupby_radio"].on_change(
"active",
self.update_groupby
)
# "Filter by" radio
# Get all possible factors, and remove the one selected in "Group by"
filterby_list = list(self.factors_dict.keys())
del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"] = RadioButtonGroup(
name="filterby_radio",
labels=filterby_list, active=0
)
self.doc.add_root(self.widgets["filterby_radio"])
# The functions are defined inside the template to avoid writing too
# much JS server side
self.widgets["filterby_radio"].on_change(
"active",
self.update_filterby
)
# Filter selector
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"] = Select(
# We need a different name to avoid collision in the template with
# the runs comparison's widget
name="select_filter", title="Select a filter :",
value=options[0], options=options
)
self.doc.add_root(self.widgets["select_filter"])
self.widgets["select_filter"]\
.on_change("value", self.update_filter)
# Toggle for outliers filtering
self.widgets["outliers_filtering_inspect"] = CheckboxGroup(
name="outliers_filtering_inspect",
labels=["Filter outliers"], active=[]
)
self.doc.add_root(self.widgets["outliers_filtering_inspect"])
self.widgets["outliers_filtering_inspect"]\
.on_change("active", self.update_outliers_filtering)
# Communication methods
# (to send/receive messages to/from master)
def change_repo(self, new_data, new_metadata):
'''
When received, update data and metadata with the new repo, and update
everything
'''
self.data = new_data
self.metadata = new_metadata
self.runs_dict = self.gen_runs_selection()
runs_display = list(self.runs_dict.keys())
current_run_display = runs_display[-1]
# Update widget (and trigger its callback)
self.widgets["select_run"].options = runs_display
self.widgets["select_run"].value = current_run_display
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
self.run_data = self.data[self.data["timestamp"] == self.current_run]
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
# Update widget (and trigger its callback)
self.widgets["select_filter"].options = options
self.widgets["select_filter"].value = options[0]
def switch_view(self, run_name):
'''When received, switch selected run to run_name'''
# This will trigger the widget's callback
self.widgets["select_run"].value = run_name
# Constructor
def __init__(self, master, doc, data, metadata):
'''
Here are the most important attributes of the InspectRuns class
master : reference to the ViewMaster class
doc : an object provided by Bokeh to add elements to the HTML document
data : pandas dataframe containing all the tests data
metadata : pandas dataframe containing all the tests metadata
sources : ColumnDataSource object provided by Bokeh, contains current
data for the plots (inside the .data attribute)
plots : dictionary of Bokeh plots
widgets : dictionary of Bokeh widgets
'''
self.master = master
self.doc = doc
self.data = data
self.metadata = metadata
self.sources = {
"mu_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}),
"s10_source": ColumnDataSource(data={}),
"s2_source": ColumnDataSource(data={})
}
self.plots = {}
self.widgets = {}
# Setup Bokeh objects
self.setup_plots()
self.setup_widgets()
# Pass the initial metadata to the template (will be updated in CustomJS
# callbacks). This is required because metadata is not displayed in a
# Bokeh widget, so we can't update this with a server callback.
initial_run = helper.get_metadata(self.metadata, self.current_run)
self.doc.template_variables["initial_timestamp"] = initial_run.name
self.doc.template_variables["initial_repo"] = initial_run.repo_name
# At this point, everything should have been initialized, so we can
# show the plots for the first time
self.update_plots()
| gpl-3.0 |
Barmaley-exe/scikit-learn | sklearn/datasets/tests/test_base.py | 39 | 5607 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
elijah513/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
WhatWorksWhenForWhom/nlppln | nlppln/commands/save_ner_data.py | 1 | 1125 | #!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
from nlppln.utils import create_dirs, get_files
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
@click.option('--name', '-n', default='ner_stats.csv')
def nerstats(in_dir, out_dir, name):
create_dirs(out_dir)
frames = []
in_files = get_files(in_dir)
for fi in in_files:
with codecs.open(fi, encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [os.path.basename(fi)
for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(os.path.join(out_dir, name), encoding='utf-8')
if __name__ == '__main__':
nerstats()
| apache-2.0 |
tudarmstadt-lt/context-eval | twsi_upper_bound.py | 2 | 1796 | import twsi_eval
import argparse
from pandas import read_csv
from twsi_eval import TWSI_INVENTORY, map_sense_inventories, calculate_evaluation_scores
TWSI_DATASET = 'data/Dataset-TWSI-2.csv'
def evaluate_uppper_bound(twsi_dataset_fath, user2twsi):
print 'Estimating upper bound performance: ', twsi_dataset_fath
correct = 0
checked = set()
predictions = read_csv(twsi_dataset_fath, sep='\t', encoding='utf8')
i = -1
for i, row in predictions.iterrows():
context_id = row.context_id
gold_sense_ids = unicode(row.gold_sense_ids)
key = unicode(context_id) + row.target
if key not in checked:
checked.add(key)
if gold_sense_ids in user2twsi[row.target].values():
correct += 1
return correct, i+1
def main():
parser = argparse.ArgumentParser(description='Estimation of the upper bound performance given the custom Word Sense Inventory.')
parser.add_argument('user_inventory', metavar='sense-inventory', help='word sense inventory file, format:\n word_senseID <tab> list,of,words')
parser.add_argument('-predictions', help='word sense disambiguation predictions in the 9 column lexical sample format. By default use full Dataset-TWSI-2 set.', default=TWSI_DATASET)
args = parser.parse_args()
user2twsi = map_sense_inventories(TWSI_INVENTORY, args.user_inventory)
correct, count = evaluate_uppper_bound(args.predictions, user2twsi)
print "\nUpper Bound Results:"
print "Correct, retrieved, nr_sentences"
print correct, "\t", count
precision, recall, fscore = calculate_evaluation_scores(correct=correct, retrieved=correct, itemcount=count)
print "Precision:", precision, "\tRecall:", recall, "\tF1:", fscore
if __name__ == '__main__':
main()
| apache-2.0 |
jenfly/atmos-read | scripts/merra-replace-data.py | 1 | 5275 | """
Replace corrupted data files with daily data re-downloaded with wget
"""
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import shutil
import xarray as xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = '/net/eady/data1/jwalker/datastore/merra2/wget/'
savedir = '/net/eady/data1/jwalker/datastore/merra2/merged/'
probdata = pd.read_csv('scripts/merra_urls/merge_data.csv', index_col=0)
# For each corrupted data file:
# - load the corrupted data file
# - load the new downloaded file for the problem day
# - calculate d/dp and other stuff
# - merge the data for the affected day
# - save into data file for the year
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False, squeeze=True)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def var_calcs(filenm, varnm, plev, latlon=(-90, 90, 40, 120)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
if varnm == 'DUDP':
nm, dp = 'U', True
elif varnm == 'DOMEGADP':
nm, dp = 'OMEGA', True
else:
nm, dp = varnm, False
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if dp:
print('Computing d/dp')
var = pgradient(var, lat1, lat2, lon1, lon2, plev)
else:
var = latlon_data(var, lat1, lat2, lon1, lon2, plev)
return var
def process_row(row, datadir, savedir):
filenm1 = row['filename']
year = row['year']
varnm = row['varnm']
plev = row['plev']
jday = row['jday']
filenm2 = datadir + row['datfile']
savefile1 = filenm1
savefile2 = savedir + os.path.split(filenm1)[1]
print('%d, %s, plev=%d' % (year, varnm, plev))
print('Reading original data from ' + filenm1)
with xray.open_dataset(filenm1) as ds:
var1 = ds[varnm].load()
print('Processing new data from ' + filenm2)
var2 = var_calcs(filenm2, varnm, plev)
print('Merging data for jday %d' % jday)
var = var1.copy()
ind = jday - 1
days = atm.get_coord(var1, 'day')
if not days[ind] == jday:
raise ValueError('Days not indexed from 1, need to edit code to handle')
var[ind] = var2
print('Saving to ' + savefile1)
var.to_netcdf(savefile1)
print('Saving to ' + savefile2)
var.to_netcdf(savefile2)
data = {'orig' : var1, 'new' : var2, 'merged' : var}
return data
# Make a copy of each of the original files -- only run this code once!
# for filenm in probdata['filename']:
# shutil.copyfile(filenm, filenm.replace('.nc', '_orig.nc'))
for i, row in probdata.iterrows():
data = process_row(row, datadir, savedir)
# Plot data to check
def plot_data(probdata, savedir, i):
row = probdata.iloc[i]
filenm = row['filename']
filenm = savedir + os.path.split(filenm)[1]
jday = row['jday']
varnm = row['varnm']
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
plt.figure(figsize=(16, 8))
plt.suptitle(os.path.split(filenm)[1])
plt.subplot(1, 3, 1)
atm.pcolor_latlon(var.sel(day=(jday-1)))
plt.title(jday - 1)
plt.subplot(1, 3, 2)
atm.pcolor_latlon(var.sel(day=jday))
plt.title(jday)
plt.subplot(1, 3, 3)
atm.pcolor_latlon(var.sel(day=(jday+1)))
plt.title(jday + 1) | mit |
ezietsman/msc-thesis | images/makeunflat2.py | 1 | 1059 | from pylab import *
import astronomy as ast
# to format the labels better
from matplotlib.ticker import FormatStrFormatter
fmt = FormatStrFormatter('%1.2g') # or whatever
X1 = load('ec2117ans_1_c.dat')
x1 = X1[:,0]
y1 = 10**(X1[:,2]/(-2.5))
y1 /= average(y1)
T0 = 2453964.3307097
P = 0.1545255
figure(figsize=(6,4))
subplots_adjust(hspace=0.6,left=0.16)
ax = subplot(211)
#plot(x1,y1,'.')
scatter((x1-T0)/P,y1,s=0.8,faceted=False)
xlabel('Orbital Phase')
ylabel('Intensity')
title('Original Lightcurve')
#ylim(min(y1)-0.0000005,max(y1)+0.0000005)
ax.yaxis.set_major_formatter(fmt)
ax = subplot(212)
x2,y2 = ast.signal.dft(x1,y1,0,7000,1)
plot(x2,y2,'k-')
xlabel('Frequency (cycles/day)')
ylabel('Amplitude')
#vlines(3560,0.000000025,0.00000003,color='k',linestyle='solid')
#vlines(950,0.000000025,0.00000003,color='k',linestyle='solid')
#text(3350,0.000000035,'DNO',fontsize=10)
#text(700,0.000000035,'lpDNO',fontsize=10)
xlim(0,7000)
ylim(0,0.004)
title('Periodogram')
#ax.yaxis.set_major_formatter(fmt)
savefig('unflattened.png')
show()
| mit |
Parallel-in-Time/pySDC | pySDC/playgrounds/Allen_Cahn/AllenCahn_contracting_circle_standard_integrators.py | 1 | 5930 | import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import allencahn_fullyimplicit, allencahn_semiimplicit
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_problem():
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = (128, 128)
problem_params['eps'] = 0.04
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1E-07
problem_params['lin_tol'] = 1E-08
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
return problem_params
def run_implicit_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
u_new = problem.solve_system(rhs=u, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_imex_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_semiimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=imex_mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
f = problem.eval_f(u, t)
rhs = u + dt * f.expl
u_new = problem.solve_system(rhs=rhs, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_CrankNicholson(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0)/dt)
startt = time.time()
t = t0
for n in range(nsteps):
rhs = u + dt / 2 * problem.eval_f(u, t)
u_new = problem.solve_system(rhs=rhs, factor=dt / 2, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def compute_radius(u, dx, t, init_radius):
c = np.count_nonzero(u >= 0.0)
radius = np.sqrt(c / np.pi) * dx
exact_radius = np.sqrt(max(init_radius ** 2 - 2.0 * t, 0))
return radius, exact_radius
def plot_radius(xcoords, exact_radius, radii):
fig, ax = plt.subplots()
plt.plot(xcoords, exact_radius, color='k', linestyle='--', linewidth=1, label='exact')
for type, radius in radii.items():
plt.plot(xcoords, radius, linestyle='-', linewidth=2, label=type)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
fname = 'data/AC_contracting_circle_standard_integrators'
plt.savefig('{}.pdf'.format(fname), bbox_inches='tight')
# plt.show()
def main_radius(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# setup parameters "in time"
t0 = 0.0
dt = 0.001
Tend = 0.032
radii = {}
_, radius, exact_radius = run_implicit_Euler(t0=t0, dt=dt, Tend=Tend)
radii['implicit-Euler'] = radius
_, radius, exact_radius = run_imex_Euler(t0=t0, dt=dt, Tend=Tend)
radii['imex-Euler'] = radius
_, radius, exact_radius = run_CrankNicholson(t0=t0, dt=dt, Tend=Tend)
radii['CrankNicholson'] = radius
xcoords = [t0 + i * dt for i in range(int((Tend - t0) / dt))]
plot_radius(xcoords, exact_radius, radii)
def main_error(cwd=''):
t0 = 0
Tend = 0.032
errors = {}
# err, _, _ = run_implicit_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['implicit-Euler'] = err
# err, _, _ = run_imex_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['imex-Euler'] = err
err, _, _ = run_CrankNicholson(t0=t0, dt=0.001/64, Tend=Tend)
errors['CrankNicholson'] = err
if __name__ == "__main__":
main_error()
# main_radius()
| bsd-2-clause |
fspaolo/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 5 | 7058 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD 3 clause
import warnings
from tempfile import mkdtemp
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster.hierarchical import _hc_cut
from sklearn.feature_extraction.image import grid_to_graph
def test_structured_ward_tree():
"""
Check that we obtain the correct solution for structured ward tree.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_components, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError, ward_tree, X.T, np.ones((4, 4)))
def test_unstructured_ward_tree():
"""
Check that we obtain the correct solution for unstructured ward tree.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
warnings.simplefilter("ignore", DeprecationWarning)
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = ward_tree(this_X.T,
n_clusters=10)
assert_equal(len(warning_list), 1)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_ward_tree():
"""
Check that the height of ward tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_nodes, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_ward_clustering():
"""
Check that we obtain the correct number of clusters with Ward clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(100, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = Ward(n_clusters=10, connectivity=connectivity)
clustering.fit(X)
# test caching
clustering = Ward(n_clusters=10, connectivity=connectivity,
memory=mkdtemp())
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Turn caching off now
clustering = Ward(n_clusters=10, connectivity=connectivity)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = Ward(n_clusters=10,
connectivity=connectivity.todense())
assert_raises(TypeError, clustering.fit, X)
clustering = Ward(n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.todense()[:10, :10]))
assert_raises(ValueError, clustering.fit, X)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_true(np.size(np.unique(ward.labels_)) == 5)
Xred = ward.transform(X)
assert_true(Xred.shape[1] == 5)
Xfull = ward.inverse_transform(Xred)
assert_true(np.unique(Xfull[0]).size == 5)
assert_array_almost_equal(ward.transform(Xfull), Xred)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit ward with full connectivity (i.e. unstructured) vs scipy
"""
from scipy.sparse import lil_matrix
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
connectivity = lil_matrix(np.ones((n, n)))
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = ward_tree(X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import NearestNeighbors
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
nn = NearestNeighbors(n_neighbors=10).fit(X)
connectivity = nn.kneighbors_graph(X)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = Ward(connectivity=c)
with warnings.catch_warnings(record=True):
w.fit(x)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
fluxcapacitor/source.ml | jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/Word2Vec/word2vec_basic.py | 8 | 8995 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
INM-6/elephant | elephant/sta.py | 2 | 13537 | # -*- coding: utf-8 -*-
"""
Functions to calculate spike-triggered average and spike-field coherence of
analog signals.
.. autosummary::
:toctree: _toctree/sta
spike_triggered_average
spike_field_coherence
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
import numpy as np
import quantities as pq
import scipy.signal
from neo.core import AnalogSignal, SpikeTrain
from .conversion import BinnedSpikeTrain
__all__ = [
"spike_triggered_average",
"spike_field_coherence"
]
def spike_triggered_average(signal, spiketrains, window):
"""
Calculates the spike-triggered averages of analog signals in a time window
relative to the spike times of a corresponding spiketrain for multiple
signals each. The function receives n analog signals and either one or
n spiketrains. In case it is one spiketrain this one is muliplied n-fold
and used for each of the n analog signals.
Parameters
----------
signal : neo AnalogSignal object
'signal' contains n analog signals.
spiketrains : one SpikeTrain or one numpy ndarray or a list of n of either of these.
'spiketrains' contains the times of the spikes in the spiketrains.
window : tuple of 2 Quantity objects with dimensions of time.
'window' is the start time and the stop time, relative to a spike, of
the time interval for signal averaging.
If the window size is not a multiple of the sampling interval of the
signal the window will be extended to the next multiple.
Returns
-------
result_sta : neo AnalogSignal object
'result_sta' contains the spike-triggered averages of each of the
analog signals with respect to the spikes in the corresponding
spiketrains. The length of 'result_sta' is calculated as the number
of bins from the given start and stop time of the averaging interval
and the sampling rate of the analog signal. If for an analog signal
no spike was either given or all given spikes had to be ignored
because of a too large averaging interval, the corresponding returned
analog signal has all entries as nan. The number of used spikes and
unused spikes for each analog signal are returned as annotations to
the returned AnalogSignal object.
Examples
--------
>>> signal = neo.AnalogSignal(np.array([signal1, signal2]).T, units='mV',
... sampling_rate=10/ms)
>>> stavg = spike_triggered_average(signal, [spiketrain1, spiketrain2],
... (-5 * ms, 10 * ms))
"""
# checking compatibility of data and data types
# window_starttime: time to specify the start time of the averaging
# interval relative to a spike
# window_stoptime: time to specify the stop time of the averaging
# interval relative to a spike
window_starttime, window_stoptime = window
if not (isinstance(window_starttime, pq.quantity.Quantity) and
window_starttime.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError("The start time of the window (window[0]) "
"must be a time quantity.")
if not (isinstance(window_stoptime, pq.quantity.Quantity) and
window_stoptime.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError("The stop time of the window (window[1]) "
"must be a time quantity.")
if window_stoptime <= window_starttime:
raise ValueError("The start time of the window (window[0]) must be "
"earlier than the stop time of the window (window[1]).")
# checks on signal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of analog signals
num_signals = signal.shape[1]
else:
raise ValueError("Empty analog signal, hence no averaging possible.")
if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
raise ValueError("The chosen time window is larger than the "
"time duration of the signal.")
# spiketrains type check
if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
spiketrains = [spiketrains]
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
"one of those, not %s." % type(spiketrains))
# multiplying spiketrain in case only a single spiketrain is given
if len(spiketrains) == 1 and num_signals != 1:
template = spiketrains[0]
spiketrains = []
for i in range(num_signals):
spiketrains.append(template)
# checking for matching numbers of signals and spiketrains
if num_signals != len(spiketrains):
raise ValueError(
"The number of signals and spiketrains has to be the same.")
# checking the times of signal and spiketrains
for i in range(num_signals):
if spiketrains[i].t_start < signal.t_start:
raise ValueError(
"The spiketrain indexed by %i starts earlier than "
"the analog signal." % i)
if spiketrains[i].t_stop > signal.t_stop:
raise ValueError(
"The spiketrain indexed by %i stops later than "
"the analog signal." % i)
# *** Main algorithm: ***
# window_bins: number of bins of the chosen averaging interval
window_bins = int(np.ceil(((window_stoptime - window_starttime) *
signal.sampling_rate).simplified))
# result_sta: array containing finally the spike-triggered averaged signal
result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
sampling_rate=signal.sampling_rate, units=signal.units)
# setting of correct times of the spike-triggered average
# relative to the spike
result_sta.t_start = window_starttime
used_spikes = np.zeros(num_signals, dtype=int)
unused_spikes = np.zeros(num_signals, dtype=int)
total_used_spikes = 0
for i in range(num_signals):
# summing over all respective signal intervals around spiketimes
for spiketime in spiketrains[i]:
# checks for sufficient signal data around spiketime
if (spiketime + window_starttime >= signal.t_start and
spiketime + window_stoptime <= signal.t_stop):
# calculating the startbin in the analog signal of the
# averaging window for spike
startbin = int(np.floor(((spiketime + window_starttime -
signal.t_start) * signal.sampling_rate).simplified))
# adds the signal in selected interval relative to the spike
result_sta[:, i] += signal[
startbin: startbin + window_bins, i]
# counting of the used spikes
used_spikes[i] += 1
else:
# counting of the unused spikes
unused_spikes[i] += 1
# normalization
result_sta[:, i] = result_sta[:, i] / used_spikes[i]
total_used_spikes += used_spikes[i]
if total_used_spikes == 0:
warnings.warn(
"No spike at all was either found or used for averaging")
result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)
return result_sta
def spike_field_coherence(signal, spiketrain, **kwargs):
"""
Calculates the spike-field coherence between a analog signal(s) and a
(binned) spike train.
The current implementation makes use of scipy.signal.coherence(). Additional
kwargs will will be directly forwarded to scipy.signal.coherence(),
except for the axis parameter and the sampling frequency, which will be
extracted from the input signals.
The spike_field_coherence function receives an analog signal array and
either a binned spike train or a spike train containing the original spike
times. In case of original spike times the spike train is binned according
to the sampling rate of the analog signal array.
The AnalogSignal object can contain one or multiple signal traces. In case
of multiple signal traces, the spike field coherence is calculated
individually for each signal trace and the spike train.
Parameters
----------
signal : neo AnalogSignal object
'signal' contains n analog signals.
spiketrain : SpikeTrain or BinnedSpikeTrain
Single spike train to perform the analysis on. The bin_size of the
binned spike train must match the sampling_rate of signal.
**kwargs:
All kwargs are passed to `scipy.signal.coherence()`.
Returns
-------
coherence : complex Quantity array
contains the coherence values calculated for each analog signal trace
in combination with the spike train. The first dimension corresponds to
the frequency, the second to the number of the signal trace.
frequencies : Quantity array
contains the frequency values corresponding to the first dimension of
the 'coherence' array
Examples
--------
Plot the SFC between a regular spike train at 20 Hz, and two sinusoidal
time series at 20 Hz and 23 Hz, respectively.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from quantities import s, ms, mV, Hz, kHz
>>> import neo, elephant
>>> t = pq.Quantity(range(10000),units='ms')
>>> f1, f2 = 20. * Hz, 23. * Hz
>>> signal = neo.AnalogSignal(np.array([
np.sin(f1 * 2. * np.pi * t.rescale(s)),
np.sin(f2 * 2. * np.pi * t.rescale(s))]).T,
units=pq.mV, sampling_rate=1. * kHz)
>>> spiketrain = neo.SpikeTrain(
range(t[0], t[-1], 50), units='ms',
t_start=t[0], t_stop=t[-1])
>>> sfc, freqs = elephant.sta.spike_field_coherence(
signal, spiketrain, window='boxcar')
>>> plt.plot(freqs, sfc[:,0])
>>> plt.plot(freqs, sfc[:,1])
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('SFC')
>>> plt.xlim((0, 60))
>>> plt.show()
"""
if not hasattr(scipy.signal, 'coherence'):
raise AttributeError('scipy.signal.coherence is not available. The sfc '
'function uses scipy.signal.coherence for '
'the coherence calculation. This function is '
'available for scipy version 0.16 or newer. '
'Please update you scipy version.')
# spiketrains type check
if not isinstance(spiketrain, (SpikeTrain, BinnedSpikeTrain)):
raise TypeError(
"spiketrain must be of type SpikeTrain or BinnedSpikeTrain, "
"not %s." % type(spiketrain))
# checks on analogsignal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of individual traces in the analog signal
num_signals = signal.shape[1]
elif len(signal.shape) == 1:
num_signals = 1
else:
raise ValueError("Empty analog signal.")
len_signals = signal.shape[0]
# bin spiketrain if necessary
if isinstance(spiketrain, SpikeTrain):
spiketrain = BinnedSpikeTrain(
spiketrain, bin_size=signal.sampling_period)
# check the start and stop times of signal and spike trains
if spiketrain.t_start < signal.t_start:
raise ValueError(
"The spiketrain starts earlier than the analog signal.")
if spiketrain.t_stop > signal.t_stop:
raise ValueError(
"The spiketrain stops later than the analog signal.")
# check equal time resolution for both signals
if spiketrain.bin_size != signal.sampling_period:
raise ValueError(
"The spiketrain and signal must have a "
"common sampling frequency / bin_size")
# calculate how many bins to add on the left of the binned spike train
delta_t = spiketrain.t_start - signal.t_start
if delta_t % spiketrain.bin_size == 0:
left_edge = int((delta_t / spiketrain.bin_size).magnitude)
else:
raise ValueError("Incompatible binning of spike train and LFP")
right_edge = int(left_edge + spiketrain.n_bins)
# duplicate spike trains
spiketrain_array = np.zeros((1, len_signals))
spiketrain_array[0, left_edge:right_edge] = spiketrain.to_array()
spiketrains_array = np.repeat(spiketrain_array, repeats=num_signals, axis=0).transpose()
# calculate coherence
frequencies, sfc = scipy.signal.coherence(
spiketrains_array, signal.magnitude,
fs=signal.sampling_rate.rescale('Hz').magnitude,
axis=0, **kwargs)
return (pq.Quantity(sfc, units=pq.dimensionless),
pq.Quantity(frequencies, units=pq.Hz))
| bsd-3-clause |
aflaxman/scikit-learn | benchmarks/bench_sgd_regression.py | 50 | 5569 | # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
max_iter = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
max_iter=max_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25, tol=1e-3)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("max_iter", max_iter)
print("- benchmarking A-SGD")
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
max_iter=max_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05, tol=1e-3,
average=(max_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
| bsd-3-clause |
ThomasSweijen/TPF | examples/adaptiveintegrator/simple-scene-plot-NewtonIntegrator.py | 6 | 2027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=0.0,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(5./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
caltech-chimera/pychimera | scripts/multiphot.py | 1 | 9783 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to perform aperture photometry on CHIMERA science frames.
Usage: python fastphot.py [options] image coords
Authors:
Navtej Saini, Lee Rosenthal
Organization:
Caltech, Pasadena, CA, USA
Version:
7 January 2016 0.1 Initial implementation
9 February 2016 0.2 User input for photometric zero point
28 July 2017 0.3 Allow processing of multiple stars.
--------------------------------------------------------------------------
"""
import os, sys
import numpy as np, warnings
from StringIO import StringIO
from optparse import OptionParser
try:
import matplotlib.pylab as plt
except ImportError:
plot_flag = False
else:
try:
import seaborn
except ImportError:
pass
plot_flag = True
import chimera
def plotter(phot_data, nframes, exptime, outfile):
"""
Plot light curve.
Parameters
----------
phot_data : numpy array
Photometry array
nframes : int
Number of image cube frames
exptime : float
Kinetic or accumulation time
outfile : string
Name of the out png image
Returns
-------
None
"""
params = {'backend': 'ps',
'font.size': 10,
'axes.labelweight': 'medium',
'figure.dpi' : 300,
'savefig.dpi': 300,
'savefig.jpeg_quality': 100
}
plt.rcParams.update(params)
ts = np.linspace(0, nframes*exptime, nframes)
plt.figure(figsize=(6,4))
plt.title("Normalized Light Curve : %s" %phot_data[0]['DATETIME'].split('T')[0])
plt.xlabel("Time (secs)")
plt.ylabel("Normalized Flux")
plt.plot(ts, phot_data['FLUX_ADU']/np.mean(phot_data['FLUX_ADU']), "r-")
plt.savefig(outfile, dpi = 300, bbox_inches = "tight")
return
def process(infile, coords, method, inner_radius, outer_radius, cen_method, window_size, output, zmag):
"""
Entry point function to process science image.
Parameters
----------
infile : string
Science image or list of science images
coords : string
Input text file with coordinates of stars
method : string
FWHM of the stelar psf in pixels
inner_radius : float
Sky background sigma
outer_radius : int
Inner sky annulus radius in pixels
cen_method : string
Centroid method
window_size : int
Centroid finding window size in pixels
output : string
Output file name
zmag : float
Photometric zero point
Returns
-------
None
"""
print "FASTPHOT: CHIMERA Fast Aperture Photometry Routine"
inner_radius = float(inner_radius)
outer_radius = float(outer_radius)
# Check if input is a string of FITS images or a text file with file names
if infile[0] == "@":
infile = infile[1:]
if not os.path.exists(infile):
print "REGISTER: Not able to locate file %s" %infile
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = infile.split(",")
# Number of images
ncubes = len(image_cubes)
pos = np.loadtxt(coords, ndmin = 2)
nstars = len(pos)
total_phot_data = []
for i in range(ncubes):
sci_file = image_cubes[i]
print " Processing science image %s" %sci_file
# Read FITS image and star coordinate
image = chimera.fitsread(sci_file)
# Instantiate an Aperphot object
ap = chimera.Aperphot(sci_file, coords)
# Set fwhmpsf, sigma, annulus, dannulus and zmag
ap.method = method
ap.inner_radius = inner_radius
ap.outer_radius = outer_radius
if zmag != "":
ap.zmag = float(zmag)
# Determine nominal aperture radius for photometry
if i == 0:
nom_aper = ap.cog(window_size, cen_method)
print " Nominal aperture radius : %4.1f pixels" %nom_aper
# Perform aperture photometry on all the frames
dtype = [("DATETIME", "S25"),("XCEN", "f4"),("YCEN", "f4"),("MSKY", "f8"),("NSKY", "f8"),("AREA", "f8"),("FLUX_ADU", "f8"),("FLUX_ELEC", "f8"),("FERR", "f8"),("MAG", "f8")]
phot_data = np.zeros([nstars, ap.nframes], dtype = dtype)
for j in range(ap.nframes):
print " Processing frame number : %d" %(j+1)
objpos = chimera.recenter(image[j,:,:], pos, window_size, cen_method)
aperphot_data = ap.phot(image[j,:,:], objpos, nom_aper)
pos = np.copy(objpos)
phot_data[:,j]['DATETIME'] = ap.addtime(j * ap.kintime).isoformat()
phot_data[:,j]['XCEN'] = aperphot_data["xcenter_raw"]
phot_data[:,j]['YCEN'] = aperphot_data["ycenter_raw"]
phot_data[:,j]['MSKY'] = aperphot_data["msky"]
phot_data[:,j]['NSKY'] = aperphot_data["nsky"]
phot_data[:,j]['AREA'] = aperphot_data["area"]
phot_data[:,j]['FLUX_ADU'] = aperphot_data["flux"]
phot_data[:,j]['FLUX_ELEC'] = phot_data[:,j]['FLUX_ADU'] * ap.epadu
phot_data[:,j]['MAG'] = ap.zmag - 2.5 * np.log10(phot_data[:,j]['FLUX_ELEC']/ap.exptime)
# Calculate error in flux - using the formula
# err = sqrt(flux * gain + npix * (1 + (npix/nsky)) * (flux_sky * gain + R**2))
phot_data[:,j]['FERR'] = np.sqrt(phot_data[:,j]['FLUX_ELEC'] + phot_data[:,j]['AREA'] * (1 + phot_data[j]['AREA']/phot_data[j]['NSKY']) * (phot_data[j]['MSKY'] * ap.epadu + ap.readnoise**2))
total_phot_data.append(phot_data)
# Save photometry data in numpy binary format
print " Saving photometry data as numpy binary"
if output != "":
npy_outfile = output + ".npy"
else:
npy_outfile = sci_file.replace(".fits", ".phot.npy")
if os.path.exists(npy_outfile):
os.remove(npy_outfile)
#np.save(npy_outfile, phot_data)
# Plot first pass light curve
if plot_flag:
print " Plotting normalized light curve"
if output != "":
plt_outfile = output + ".png"
else:
plt_outfile = sci_file.replace(".fits", ".lc.png")
plotter(phot_data, ap.nframes, ap.kintime, plt_outfile)
# Convert the total_phot_data to array and reshape it
print ' Saving consolidated photometry data...'
total_phot_data_arr = np.concatenate(total_phot_data, axis=1)
# Save the array as npy file
if output != "":
np.save(output+"phot_total.npy", total_phot_data_arr)
else: np.save("phot_total.npy", total_phot_data_arr)
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image coords"
description = "Description. Utility to perform fast aperture photometry in CHIMERA science images."
parser = OptionParser(usage = usage, version = "%prog 0.2", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-m", "--method", dest = "method",
action="store", metavar="METHOD", help = "Method to use for determining overlap between aperture and pixels (default is exact)",
default = "exact"
)
parser.add_option("-i", "--inner_radius", dest = "inner_radius",
action="store", metavar="INNER_RADIUS", help = "Inner radius of sky annlus in pixels (default is 14)",
default = 14
)
parser.add_option("-d", "--outer_radius", dest = "outer_radius",
action="store", metavar="OUTER_RADIUS", help = "Radius of sky annulus in pixels (default is 16)",
default = 16
)
parser.add_option("-c", "--cen_method", dest = "cen_method",
action="store", metavar="CEN_METHOD", help = "Centroid method (default is 2dg)",
default = "2dg"
)
parser.add_option("-w", "--window_size", dest = "window_size",
action="store", metavar="WINDOW_SIZE", help = "Window size for centroid (default is 35)",
default = 35
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default = ""
)
parser.add_option("-z", "--zmag", dest = "zmag",
action="store", metavar="ZMAG", help = "Photometric zeroo point",
default = ""
)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("FASTPHOT: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Switch off warnings
warnings.filterwarnings('ignore')
process(args[0], args[1], options.method, options.inner_radius, options.outer_radius, options.cen_method, options.window_size, options.output, options.zmag)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
| mit |
btabibian/scikit-learn | examples/cluster/plot_cluster_comparison.py | 46 | 6620 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
print(__doc__)
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2}),
(aniso, {'eps': .15, 'n_neighbors': 2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
asteca/ASteCA | packages/best_fit/DEPRECATED/abcpmc_algor_DEPRECATED.py | 1 | 9822 |
import numpy as np
from scipy.optimize import differential_evolution as DE
import time as t
from .abcpmc import sampler, threshold
from ..synth_clust import synth_cluster
from . import likelihood
from .emcee_algor import varPars, closeSol, discreteParams, convergenceVals
def main(
lkl_method, e_max, err_lst, completeness, max_mag_syn,
fundam_params, obs_clust, theor_tracks, R_V, ext_coefs, st_dist_mass,
N_fc, cmpl_rnd, err_rnd, nwalkers_abc, nsteps_abc, nburn_abc,
priors_abc):
varIdxs, ndim, ranges = varPars(fundam_params)
def dist(synth_clust, obs_clust):
lkl = np.inf
if synth_clust:
lkl = likelihood.main(lkl_method, synth_clust, obs_clust)
return lkl
def postfn(model):
# Re-scale z and M
model_scale = [
model[0] / 100., model[1], model[2], model[3] * 10.,
model[4] * 1000., model[5]]
check_ranges = [
r[0] <= p <= r[1] for p, r in zip(*[model_scale, ranges[varIdxs]])]
synth_clust = []
# If some parameter is outside of the given ranges, don't bother
# obtaining the proper model.
if all(check_ranges):
model_proper = closeSol(fundam_params, varIdxs, model_scale)
# Metallicity and age indexes to identify isochrone.
m_i = fundam_params[0].index(model_proper[0])
a_i = fundam_params[1].index(model_proper[1])
isochrone = theor_tracks[m_i][a_i]
# Generate synthetic cluster.
synth_clust = synth_cluster.main(
e_max, err_lst, completeness, max_mag_syn, st_dist_mass,
isochrone, R_V, ext_coefs, N_fc, cmpl_rnd, err_rnd,
model_proper)
return synth_clust
# TODO add these parameters to the input params file
alpha, init_eps = 95, None
N_conv, tol_conv = 50., 0.01
max_secs = 22. * 60. * 60.
# Break out when AF is low.
# af_low, af_min_steps = 0.001, .1
max_t_walker = 30.
# eps_stuck_perc, N_eps_stuck_max = .005, 100
# Start timing.
elapsed = 0.
available_secs = max(30, max_secs)
start_t = t.time()
abcsampler = sampler.Sampler(
N=nwalkers_abc, Y=obs_clust, postfn=postfn, dist=dist)
# Set proposal
# sampler.particle_proposal_cls = sampler.OLCMParticleProposal
if init_eps is None:
# Estimate initial threshold value using DE.
def lnprob(model):
synth_clust = postfn(model)
return dist(synth_clust, obs_clust)
# Scale parameters bounds.
bounds = [
ranges[0] * 100., ranges[1], ranges[2], ranges[3] / 10.,
ranges[4] / 1000., ranges[5]]
result = DE(lnprob, bounds, maxiter=20)
init_eps = 4. * result.fun
print(" Initial threshold value: {:.2f}".format(init_eps))
# old_eps = init_eps
# TODO pass type of threshold from params file
# eps = threshold.LinearEps(T, 5000, init_eps)
eps = threshold.ConstEps(nsteps_abc, init_eps)
# Stddev values as full range.
std = np.eye(ndim) * (ranges.max(axis=1) - ranges.min(axis=1))
# Means as middle points in ranges.
means = (ranges.max(axis=1) + ranges.min(axis=1)) / 2.
# Scale values.
std[0], means[0] = std[0] * 100, means[0] * 100
std[3], means[3] = std[3] / 10, means[3] / 10
std[4], means[4] = std[4] / 1000., means[4] / 1000.
# Gaussian prior.
print(means)
print(std)
prior = sampler.GaussianPrior(mu=means, sigma=std)
# # We'll track how the average autocorrelation time estimate changes
# tau_index, autocorr_vals = 0, np.empty(nsteps_abc)
# # This will be useful to testing convergence
# old_tau = np.inf
# Check for convergence every 2% of steps or 100, whichever value
# is lower.
# N_steps_conv = min(int(nsteps_abc * 0.02), 100)
map_sol_old, N_models, prob_mean = [[], np.inf], 0, []
# N_eps_stuck = 0
chains_nruns, maf_steps, map_lkl = [], [], []
milestones = list(range(5, 101, 5))
for pool in abcsampler.sample(prior, eps):
print(
pool.t, pool.eps, pool.ratio, np.min(pool.dists),
np.mean(pool.dists))
chains_nruns.append(pool.thetas)
maf = pool.ratio
maf_steps.append([pool.t, maf])
N_models += nwalkers_abc / maf
# reduce eps value
# old_eps = eps.eps
eps.eps = np.percentile(pool.dists, alpha)
# # Check if threshold is stuck.
# if abs(eps.eps - old_eps) < eps_stuck_perc * eps.eps:
# N_eps_stuck += 1
# else:
# N_eps_stuck = 0
# if N_eps_stuck > N_eps_stuck_max:
# print(" Threshold is stuck (runs={}).".format(pool.t + 1))
# break
# if maf < af_low and pool.t > int(af_min_steps * nsteps_abc):
# print(" AF<{} (runs={})".format(af_low, pool.t + 1))
# break
if t.time() - start_t > (max_t_walker * nwalkers_abc):
print(" Sampler is stuck (runs={})".format(pool.t + 1))
break
elapsed += t.time() - start_t
if elapsed >= available_secs:
print(" Time consumed (runs={})".format(pool.t + 1))
break
start_t = t.time()
# # Only check convergence every 'N_steps_conv' steps
# if (pool.t + 1) % N_steps_conv:
# continue
# # Compute the autocorrelation time so far. Using tol=0 means that
# # we'll always get an estimate even if it isn't trustworthy.
# try:
# tau = autocorr.integrated_time(np.array(chains_nruns), tol=0)
# autocorr_vals[tau_index] = np.nanmean(tau)
# tau_index += 1
# # Check convergence
# converged = np.all(tau * N_conv < (pool.t + 1))
# converged &= np.all(np.abs(old_tau - tau) / tau < tol_conv)
# if converged:
# print(" Convergence achieved (runs={}).".format(pool.t + 1))
# break
# old_tau = tau
# except FloatingPointError:
# pass
# Store MAP solution in this iteration.
prob_mean.append([pool.t, np.mean(pool.dists)])
idx_best = np.argmin(pool.dists)
# Update if a new optimal solution was found.
if pool.dists[idx_best] < map_sol_old[1]:
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [pars[0] / 100., pars[1], pars[2], pars[3] * 10.,
pars[4] * 1000., pars[5]]
map_sol_old = [
closeSol(fundam_params, varIdxs, pars),
pool.dists[idx_best]]
map_lkl.append([pool.t, map_sol_old[1]])
# Print progress.
percentage_complete = (100. * (pool.t + 1) / nsteps_abc)
if len(milestones) > 0 and percentage_complete >= milestones[0]:
map_sol, logprob = map_sol_old
print("{:>3}% ({:.3f}) LP={:.1f} ({:g}, {:g}, {:.3f}, {:.2f}"
", {:g}, {:.2f})".format(
milestones[0], maf, logprob, *map_sol) +
" [{:.0f} m/s]".format(N_models / elapsed))
milestones = milestones[1:]
runs = pool.t + 1
# Evolution of the mean autocorrelation time.
tau_autocorr = np.array([np.nan] * 10) # autocorr_vals[:tau_index]
tau_index = np.nan
N_steps_conv = runs
# Final MAP fit.
idx_best = np.argmin(pool.dists)
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [
pars[0] / 100., pars[1], pars[2], pars[3] * 10., pars[4] * 1000.,
pars[5]]
map_sol = closeSol(fundam_params, varIdxs, pars)
map_lkl_final = pool.dists[idx_best]
abcsampler.close()
# Shape: (runs, nwalkers, ndim)
chains_nruns = np.array(chains_nruns)
# De-scale parameters.
chains_nruns[:, :, 0] = chains_nruns[:, :, 0] / 100.
chains_nruns[:, :, 3] = chains_nruns[:, :, 3] * 10.
chains_nruns[:, :, 4] = chains_nruns[:, :, 4] * 1000.
# Burn-in range.
Nb = int(runs * nburn_abc)
# Burn-in. Shape: (ndim, nwalkers, runs)
pars_chains_bi = discreteParams(
fundam_params, varIdxs, chains_nruns[:Nb, :, :]).T
# Change values for the discrete parameters with the closest valid values.
chains_nruns = discreteParams(
fundam_params, varIdxs, chains_nruns[Nb:, :, :])
mcmc_trace = chains_nruns.reshape(-1, ndim).T
# import matplotlib.pyplot as plt
# import corner
# corner.corner(
# mcmc_trace.T, quantiles=[0.16, 0.5, 0.84], show_titles=True)
# # levels=(1 - np.exp(-0.5),))
# plt.savefig("corner.png", dpi=300)
# Convergence parameters.
acorr_t, max_at_c, min_at_c, geweke_z, emcee_acorf, mcmc_ess, minESS,\
mESS, mESS_epsilon = convergenceVals(
'abc', ndim, varIdxs, N_conv, chains_nruns, mcmc_trace)
# Store mean solution.
mean_sol = closeSol(fundam_params, varIdxs, np.mean(mcmc_trace, axis=1))
isoch_fit_params = {
'varIdxs': varIdxs, 'nsteps_abc': runs, 'mean_sol': mean_sol,
'nburn_abc': Nb, 'map_sol': map_sol, 'map_lkl': map_lkl,
'map_lkl_final': map_lkl_final, 'prob_mean': prob_mean,
'mcmc_elapsed': elapsed, 'mcmc_trace': mcmc_trace,
'pars_chains_bi': pars_chains_bi, 'pars_chains': chains_nruns.T,
'maf_steps': maf_steps, 'autocorr_time': acorr_t,
'max_at_c': max_at_c, 'min_at_c': min_at_c,
'minESS': minESS, 'mESS': mESS, 'mESS_epsilon': mESS_epsilon,
'emcee_acorf': emcee_acorf, 'geweke_z': geweke_z,
'mcmc_ess': mcmc_ess,
'N_steps_conv': N_steps_conv, 'N_conv': N_conv, 'tol_conv': tol_conv,
'tau_index': tau_index, 'tau_autocorr': tau_autocorr
}
return isoch_fit_params
| gpl-3.0 |
sadimanna/computer_vision | clustering/kmeansppclustering_with_gap_statistic.py | 1 | 2599 | #K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
| gpl-3.0 |
toobaz/pandas | pandas/tests/indexes/datetimes/test_timezones.py | 2 | 47130 | """
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas.util.testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError):
with tm.assert_produces_warning(FutureWarning):
index.tz_localize(tz=tz, errors="raise")
with tm.assert_produces_warning(
FutureWarning, clear=FutureWarning, check_stacklevel=False
):
result = index.tz_localize(tz=tz, errors="coerce")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(dr, localized)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
tm.assert_index_equal(reset, idx)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp)
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool"))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
with pytest.raises(Exception):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
with pytest.raises(pytz.AmbiguousTimeError):
date_range(
"2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H"
)
times = date_range(
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
if str(tz).startswith("dateutil"):
# fixed ambiguous behavior
# see GH#14621
assert times[-1] == Timestamp("2013-10-27 01:00:00+0100", tz=tz, freq="H")
else:
assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
[
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
pytest.param(
"dateutil/US/Pacific",
"shift_backward",
"2019-03-10 01:00",
marks=pytest.mark.xfail(reason="GH 24329"),
),
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
# construction with an nonexistent end-point
with pytest.raises(pytz.NonExistentTimeError):
date_range(
"2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H"
)
times = date_range(
"2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option
)
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range("1/1/2009", "1/1/2010")
dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
@pytest.mark.parametrize(
"method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]]
)
def test_dti_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
if method == "raise":
with pytest.raises(pytz.NonExistentTimeError):
dti.tz_localize(tz, nonexistent=method)
elif exp == "invalid":
with pytest.raises(ValueError):
dti.tz_localize(tz, nonexistent=method)
else:
result = dti.tz_localize(tz, nonexistent=method)
expected = DatetimeIndex([exp] * n, tz=tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
dti = DatetimeIndex([Timestamp(start_ts)])
result = dti.tz_localize(tz, nonexistent=shift)
expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917
tz = tz_type + "Europe/Warsaw"
dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")])
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_dti_tz_localize_errors_deprecation(self):
# GH 22644
tz = "Europe/Warsaw"
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
with pytest.raises(ValueError):
dti.tz_localize(tz, errors="foo")
# make sure errors='coerce' gets mapped correctly to nonexistent
result = dti.tz_localize(tz, errors="coerce")
expected = dti.tz_localize(tz, nonexistent="NaT")
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.parametrize(
"timezone",
[
"US/Pacific",
"US/Eastern",
"UTC",
"Asia/Kolkata",
"Asia/Shanghai",
"Australia/Canberra",
],
)
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
# ------------------------------------------------------------
# DatetimeIndex.__new__
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_constructor_static_tzinfo(self, prefix):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST")
index.hour
index[0]
def test_dti_constructor_with_fixed_tz(self):
off = FixedOffset(420, "+07:00")
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00")
assert (rng.values == rng3.values).all()
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_convert_datetime_list(self, tzstr):
dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo")
dr2 = DatetimeIndex(list(dr), name="foo")
tm.assert_index_equal(dr, dr2)
assert dr.tz == dr2.tz
assert dr2.name == "foo"
def test_dti_construction_univalent(self):
rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern")
rng2 = DatetimeIndex(data=rng, tz="US/Eastern")
tm.assert_index_equal(rng, rng2)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_from_tzaware_datetime(self, tz):
d = [datetime(2012, 8, 19, tzinfo=tz)]
index = DatetimeIndex(d)
assert timezones.tz_compare(index.tz, tz)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_constructors(self, tzstr):
""" Test different DatetimeIndex constructions with timezone
Follow-up of GH#4229
"""
arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]
idx1 = to_datetime(arr).tz_localize(tzstr)
idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
# -------------------------------------------------------------
# Unsorted
def test_join_utc_convert(self, join_type):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
left = rng.tz_convert("US/Eastern")
right = rng.tz_convert("Europe/Berlin")
result = left.join(left[:-5], how=join_type)
assert isinstance(result, DatetimeIndex)
assert result.tz == left.tz
result = left.join(right[:-5], how=join_type)
assert isinstance(result, DatetimeIndex)
assert result.tz.zone == "UTC"
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_date_accessor(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_time_accessor(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_timetz_accessor(self, tz_naive_fixture):
# GH21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz)
result = index.timetz
tm.assert_numpy_array_equal(result, expected)
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_dti_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H")
assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq
t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T")
assert t4.tz_convert(tz="UTC").freq == t4.freq
def test_drop_dst_boundary(self):
# see gh-18031
tz = "Europe/Brussels"
freq = "15min"
start = pd.Timestamp("201710290100", tz=tz)
end = pd.Timestamp("201710290300", tz=tz)
index = pd.date_range(start=start, end=end, freq=freq)
expected = DatetimeIndex(
[
"201710290115",
"201710290130",
"201710290145",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290300",
],
tz=tz,
freq=freq,
ambiguous=[
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
],
)
result = index.drop(index[0])
tm.assert_index_equal(result, expected)
def test_date_range_localize(self):
rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern")
rng3 = date_range("3/11/2012 03:00", periods=15, freq="H")
rng3 = rng3.tz_localize("US/Eastern")
tm.assert_index_equal(rng, rng3)
# DST transition time
val = rng[0]
exp = Timestamp("3/11/2012 03:00", tz="US/Eastern")
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern")
tm.assert_index_equal(rng, rng2)
exp = Timestamp("3/11/2012 00:00", tz="US/Eastern")
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp("3/11/2012 01:00", tz="US/Eastern")
assert exp.hour == 1
assert rng[1] == exp
rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern")
assert rng[2].hour == 3
def test_timestamp_equality_different_timezones(self):
utc_range = date_range("1/1/2000", periods=20, tz="UTC")
eastern_range = utc_range.tz_convert("US/Eastern")
berlin_range = utc_range.tz_convert("Europe/Berlin")
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_dti_intersection(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_dti_equals_with_tz(self):
left = date_range("1/1/2011", periods=100, freq="H", tz="utc")
right = date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern")
assert not left.equals(right)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_nat(self, tzstr):
idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT])
assert isna(idx[1])
assert idx[0].tzinfo is not None
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_astype_asobject_tzinfos(self, tzstr):
# GH#1345
# dates around a dst transition
rng = date_range("2/13/2010", "5/6/2010", tz=tzstr)
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_with_timezone_repr(self, tzstr):
rng = date_range("4/13/2010", "5/6/2010")
rng_eastern = rng.tz_localize(tzstr)
rng_repr = repr(rng_eastern)
assert "2010-04-13 00:00:00" in rng_repr
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_take_dont_lose_meta(self, tzstr):
rng = date_range("1/1/2000", periods=20, tz=tzstr)
result = rng.take(range(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_utc_box_timestamp_and_localize(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tzstr)
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
assert stamp == expected
assert stamp.tzinfo == expected.tzinfo
# right tzinfo
rng = date_range("3/13/2012", "3/14/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tzstr)
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr(
rng_eastern[0].tzinfo
)
def test_dti_to_pydatetime(self):
dt = dateutil.parser.parse("2012-06-13T01:39:00Z")
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_dti_to_pydatetime_fizedtz(self):
dates = np.array(
[
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
)
dti = DatetimeIndex(dates)
result = dti.to_pydatetime()
tm.assert_numpy_array_equal(dates, result)
result = dti._mpl_repr()
tm.assert_numpy_array_equal(dates, result)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")])
def test_with_tz(self, tz):
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=pd.offsets.Hour())
assert dr.tz is pytz.utc
# DateRange with naive datetimes
dr = bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc)
dr = bdate_range("1/1/2005", "1/1/2009", tz=tz)
# normalized
central = dr.tz_convert(tz)
assert central.tz is tz
naive = central[0].to_pydatetime().replace(tzinfo=None)
comp = conversion.localize_pydatetime(naive, tz).tzinfo
assert central[0].tz is comp
# compare vs a localized tz
naive = dr[0].to_pydatetime().replace(tzinfo=None)
comp = conversion.localize_pydatetime(naive, tz).tzinfo
assert central[0].tz is comp
# datetimes with tzinfo set
dr = bdate_range(
datetime(2005, 1, 1, tzinfo=pytz.utc), datetime(2009, 1, 1, tzinfo=pytz.utc)
)
with pytest.raises(Exception):
bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_field_access_localize(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
assert (rng.hour == 0).all()
# a more unusual time zone, #1946
dr = date_range(
"2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan"
)
expected = Index(np.arange(10, dtype=np.int64))
tm.assert_index_equal(dr.hour, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_convert_tz_aware_datetime_datetime(self, tz):
# GH#1581
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]
dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates]
result = DatetimeIndex(dates_aware)
assert timezones.tz_compare(result.tz, tz)
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is pytz.utc
def test_dti_union_aware(self):
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern")
result = rng.union(rng2)
expected = rng.astype("O").union(rng2.astype("O"))
tm.assert_index_equal(result, expected)
assert result[0].tz.zone == "US/Central"
assert result[-1].tz.zone == "US/Eastern"
def test_dti_union_mixed(self):
# GH 21671
rng = DatetimeIndex([pd.Timestamp("2011-01-01"), pd.NaT])
rng2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo")
result = rng.union(rng2)
expected = Index(
[
pd.Timestamp("2011-01-01"),
pd.NaT,
pd.Timestamp("2012-01-01", tz="Asia/Tokyo"),
pd.Timestamp("2012-01-02", tz="Asia/Tokyo"),
],
dtype=object,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)]
)
@pytest.mark.usefixtures("datetime_tz_utc")
def test_iteration_preserves_nanoseconds(self, tz):
# GH 19603
index = DatetimeIndex(
["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz
)
for i, ts in enumerate(index):
assert ts == index[i]
class TestDateRange:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
# it works!
dr.hour
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
assert (dr.hour == 0).all()
dr = date_range("2012-11-02", periods=10, tz=tzstr)
result = dr.hour
expected = Index([0] * 10)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
result = date_range("1/1/2000", periods=10, tz=tzstr)
expected = date_range("1/1/2000", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
assert stamp.hour == 5
rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
assert stamp == rng[1]
class TestToDatetime:
"""Tests for the to_datetime constructor with timezones"""
def test_to_datetime_utc(self):
arr = np.array([dateutil.parser.parse("2012-06-13T01:39:00Z")], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
dates = [
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
result = to_datetime(dates)
assert result.tz == fixed_off
| bsd-3-clause |
elkingtonmcb/h2o-2 | py/testdir_multi_jvm/test_GLM2grid_hastie.py | 9 | 2649 | import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i
## Dataset created from this:
#
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting parse of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=csvFilename + ".hex", timeoutSecs=20)
y = "10"
# NOTE: hastie has two values, -1 and 1. To make H2O work if two valued and not 0,1 have
kwargs = {
'response': y,
'max_iter': 10,
'n_folds': 2,
'lambda': '1e-8,1e-4,1e-3',
'alpha': '0,0.25,0.8',
}
start = time.time()
print "\nStarting GLMGrid of", csvFilename
glmGridResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLMGrid in", (time.time() - start), "secs (python)"
# still get zero coeffs..best model is AUC = 0.5 with intercept only.
h2o_glm.simpleCheckGLMGrid(self,glmGridResult, allowZeroCoeff=True,**kwargs)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(2,java_heap_GB=5)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2grid_hastie(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
bucket = 'home-0xdiag-datasets'
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard' + '/' + csvFilename
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=300)
fullPathname = h2i.find_folder_and_filename('home-0xdiag-datasets', csvPathname, returnFullPath=True)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1x,pathname1x,pathname2x)
glm_doit(self, filename2x, None, pathname2x, timeoutSecs=300)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
eusoubrasileiro/fatiando | fatiando/vis/mpl.py | 2 | 35331 | """
Wrappers for :mod:`matplotlib` functions to facilitate plotting grids,
2D objects, etc.
This module loads all functions from :mod:`matplotlib.pyplot`, adds new
functions and overwrites some others (like :func:`~fatiando.vis.mpl.contour`,
:func:`~fatiando.vis.mpl.pcolor`, etc).
**Grids**
* :func:`~fatiando.vis.mpl.contour`
* :func:`~fatiando.vis.mpl.contourf`
* :func:`~fatiando.vis.mpl.pcolor`
Grids are automatically reshaped and interpolated if desired or necessary.
**2D objects**
* :func:`~fatiando.vis.mpl.points`
* :func:`~fatiando.vis.mpl.paths`
* :func:`~fatiando.vis.mpl.square`
* :func:`~fatiando.vis.mpl.squaremesh`
* :func:`~fatiando.vis.mpl.polygon`
* :func:`~fatiando.vis.mpl.layers`
* :func:`~fatiando.vis.mpl.seismic_image`
* :func:`~fatiando.vis.mpl.seismic_wiggle`
**Interactive**
* :func:`~fatiando.vis.mpl.draw_polygon`
* :func:`~fatiando.vis.mpl.draw_layers`
* :func:`~fatiando.vis.mpl.pick_points`
**Basemap (map projections)**
* :func:`~fatiando.vis.mpl.basemap`
* :func:`~fatiando.vis.mpl.draw_geolines`
* :func:`~fatiando.vis.mpl.draw_countries`
* :func:`~fatiando.vis.mpl.draw_coastlines`
**Auxiliary**
* :func:`~fatiando.vis.mpl.set_area`
* :func:`~fatiando.vis.mpl.m2km`
----
"""
import numpy
from matplotlib import pyplot, widgets
# Quick hack so that the docs can build using the mocks for readthedocs
# Ideal would be to log an error message saying that functions from pyplot
# were not imported
try:
from matplotlib.pyplot import *
except:
pass
import fatiando.gridder
# Dummy variable to laizy import the basemap toolkit (slow)
Basemap = None
def draw_polygon(area, axes, style='-', marker='o', color='k', width=2,
alpha=0.5, xy2ne=False):
"""
Draw a polygon by clicking with the mouse.
INSTRUCTIONS:
* Left click to pick the edges of the polygon;
* Draw edges CLOCKWISE;
* Press 'e' to erase the last edge;
* Right click to close the polygon;
* Close the figure window to finish;
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the area containing the polygon
* axes : matplotlib Axes
The figure to use for drawing the polygon.
To get an Axes instace, just do::
from matplotlib import pyplot
axes = pyplot.figure().add_subplot(1,1,1)
You can plot things to ``axes`` before calling this function so that
they'll appear on the background.
* style : str
Line style (as in matplotlib.pyplot.plot)
* marker : str
Style of the point markers (as in matplotlib.pyplot.plot)
* color : str
Line color (as in matplotlib.pyplot.plot)
* width : float
The line width (as in matplotlib.pyplot.plot)
* alpha : float
Transparency of the fill of the polygon. 0 for transparent, 1 for
opaque (fills the polygon once done drawing)
* xy2ne : True or False
If True, will exchange the x and y axis so that x points north.
Use this when drawing on a map viewed from above. If the y-axis of the
plot is supposed to be z (depth), then use ``xy2ne=False``.
Returns:
* edges : list of lists
List of ``[x, y]`` pairs with the edges of the polygon
"""
axes.set_title("Click to draw polygon. Right click when done.")
if xy2ne:
axes.set_xlim(area[2], area[3])
axes.set_ylim(area[0], area[1])
else:
axes.set_xlim(area[0], area[1])
axes.set_ylim(area[2], area[3])
# start with an empty line
line, = axes.plot([], [], marker=marker, linestyle=style, color=color,
linewidth=width)
tmpline, = axes.plot([], [], marker=marker, linestyle=style, color=color,
linewidth=width)
draw = axes.figure.canvas.draw
x = []
y = []
plotx = []
ploty = []
# Hack because Python 2 doesn't like nonlocal variables that change value.
# Lists it doesn't mind.
picking = [True]
def draw_guide(px, py):
if len(x) != 0:
tmpline.set_data([x[-1], px], [y[-1], py])
def move(event):
if event.inaxes != axes:
return 0
if picking[0]:
draw_guide(event.xdata, event.ydata)
draw()
def pick(event):
if event.inaxes != axes:
return 0
if event.button == 1 and picking[0]:
x.append(event.xdata)
y.append(event.ydata)
plotx.append(event.xdata)
ploty.append(event.ydata)
if event.button == 3 or event.button == 2 and picking[0]:
if len(x) < 3:
axes.set_title("Need at least 3 points to make a polygon")
else:
picking[0] = False
axes.set_title("Done! You can close the window now.")
plotx.append(x[0])
ploty.append(y[0])
tmpline.set_data([], [])
axes.fill(plotx, ploty, color=color, alpha=alpha)
line.set_data(plotx, ploty)
draw()
def erase(event):
if event.key == 'e' and picking[0]:
x.pop()
y.pop()
plotx.pop()
ploty.pop()
line.set_data(plotx, ploty)
draw_guide(event.xdata, event.ydata)
draw()
line.figure.canvas.mpl_connect('button_press_event', pick)
line.figure.canvas.mpl_connect('key_press_event', erase)
line.figure.canvas.mpl_connect('motion_notify_event', move)
pyplot.show()
if len(x) < 3:
raise ValueError("Need at least 3 points to make a polygon")
if xy2ne:
verts = numpy.transpose([y, x])
else:
verts = numpy.transpose([x, y])
return verts
def pick_points(area, axes, marker='o', color='k', size=8, xy2ne=False):
"""
Get the coordinates of points by clicking with the mouse.
INSTRUCTIONS:
* Left click to pick the points;
* Press 'e' to erase the last point picked;
* Close the figure window to finish;
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the area containing the points
* axes : matplotlib Axes
The figure to use for drawing the polygon.
To get an Axes instace, just do::
from matplotlib import pyplot
axes = pyplot.figure().add_subplot(1,1,1)
You can plot things to ``axes`` before calling this function so that
they'll appear on the background.
* marker : str
Style of the point markers (as in matplotlib.pyplot.plot)
* color : str
Line color (as in matplotlib.pyplot.plot)
* size : float
Marker size (as in matplotlib.pyplot.plot)
* xy2ne : True or False
If True, will exchange the x and y axis so that x points north.
Use this when drawing on a map viewed from above. If the y-axis of the
plot is supposed to be z (depth), then use ``xy2ne=False``.
Returns:
* points : list of lists
List of ``[x, y]`` coordinates of the points
"""
axes.set_title("Click to pick points. Close window when done.")
if xy2ne:
axes.set_xlim(area[2], area[3])
axes.set_ylim(area[0], area[1])
else:
axes.set_xlim(area[0], area[1])
axes.set_ylim(area[2], area[3])
# start with an empty set
line, = axes.plot([], [], marker=marker, color=color, markersize=size)
line.figure.canvas.draw()
x = []
y = []
plotx = []
ploty = []
# Hack because Python 2 doesn't like nonlocal variables that change value.
# Lists it doesn't mind.
picking = [True]
def pick(event):
if event.inaxes != axes:
return 0
if event.button == 1 and picking[0]:
x.append(event.xdata)
y.append(event.ydata)
plotx.append(event.xdata)
ploty.append(event.ydata)
line.set_color(color)
line.set_marker(marker)
line.set_markersize(size)
line.set_linestyle('')
line.set_data(plotx, ploty)
line.figure.canvas.draw()
def erase(event):
if event.key == 'e' and picking[0]:
x.pop()
y.pop()
plotx.pop()
ploty.pop()
line.set_data(plotx, ploty)
line.figure.canvas.draw()
line.figure.canvas.mpl_connect('button_press_event', pick)
line.figure.canvas.mpl_connect('key_press_event', erase)
pyplot.show()
if xy2ne:
points = numpy.transpose([y, x])
else:
points = numpy.transpose([x, y])
return points
def draw_layers(area, axes, style='-', marker='o', color='k', width=2):
"""
Draw series of horizontal layers by clicking with the mouse.
The y-axis is assumed to be depth, the x-axis is the physical property of
each layer.
INSTRUCTIONS:
* Click to make a new layer;
* Press 'e' to erase the last layer;
* Close the figure window to finish;
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the area containing the polygon
* axes : matplotlib Axes
The figure to use for drawing the polygon.
To get an Axes instace, just do::
from matplotlib import pyplot
axes = pyplot.figure().add_subplot(1,1,1)
You can plot things to ``axes`` before calling this function so that
they'll appear on the background.
* style : str
Line style (as in matplotlib.pyplot.plot)
* marker : str
Style of the point markers (as in matplotlib.pyplot.plot)
* color : str
Line color (as in matplotlib.pyplot.plot)
* width : float
The line width (as in matplotlib.pyplot.plot)
Returns:
* layers : list = [thickness, values]
* thickness : list
The thickness of each layer, in order of increasing depth
* values : list
The physical property value of each layer, in the same order
"""
axes.set_title("Click to set a layer. Close the window when done.")
axes.grid()
vmin, vmax, zmin, zmax = area
axes.set_xlim(vmin, vmax)
axes.set_ylim(zmax, zmin)
# start with an empty line
line, = axes.plot([], [], marker=marker, linestyle=style,
color=color, linewidth=width)
midv = 0.5 * (vmax + vmin)
# this is the line that moves around with the mouse
tmpline, = axes.plot([midv], [zmin], marker=marker, linestyle='--',
color=color, linewidth=width)
# Make a proxy for drawing
draw = axes.figure.canvas.draw
depths = [zmin]
values = []
plotv = []
plotz = []
tmpz = [zmin]
# Hack because Python 2 doesn't like nonlocal variables that change value.
# Lists it doesn't mind.
picking = [True]
def draw_guide(v, z):
if len(values) == 0:
tmpline.set_data([v, v], [tmpz[0], z])
else:
if z > tmpz[0]:
tmpline.set_data([values[-1], v, v], [tmpz[0], tmpz[0], z])
else:
tmpline.set_data([values[-1], v], [tmpz[0], tmpz[0]])
def move(event):
if event.inaxes != axes:
return 0
v, z = event.xdata, event.ydata
if picking[0]:
draw_guide(v, z)
draw()
def pick(event):
if event.inaxes != axes:
return 0
if event.button == 1 and picking[0]:
v, z = event.xdata, event.ydata
if z > tmpz[0]:
depths.append(z)
values.append(v)
plotz.extend([tmpz[0], z])
plotv.extend([v, v])
tmpz[0] = z
line.set_data(plotv, plotz)
draw()
def erase(event):
if picking[0] and len(values) > 0 and event.key == 'e':
depths.pop()
values.pop()
tmpz[0] = depths[-1]
plotv.pop()
plotv.pop()
plotz.pop()
plotz.pop()
line.set_data(plotv, plotz)
draw_guide(event.xdata, event.ydata)
draw()
line.figure.canvas.mpl_connect('button_press_event', pick)
line.figure.canvas.mpl_connect('key_press_event', erase)
line.figure.canvas.mpl_connect('motion_notify_event', move)
pyplot.show()
thickness = [depths[i + 1] - depths[i] for i in xrange(len(depths) - 1)]
return thickness, values
def draw_geolines(area, dlon, dlat, basemap, linewidth=1):
"""
Draw the parallels and meridians on a basemap plot.
Parameters:
* area : list
``[west, east, south, north]``, i.e., the area where the lines will
be plotted
* dlon, dlat : float
The spacing between the lines in the longitude and latitude directions,
respectively (in decimal degrees)
* basemap : mpl_toolkits.basemap.Basemap
The basemap used for plotting (see :func:`~fatiando.vis.mpl.basemap`)
* linewidth : float
The width of the lines
"""
west, east, south, north = area
meridians = basemap.drawmeridians(numpy.arange(west, east, dlon),
labels=[0, 0, 0, 1], linewidth=linewidth)
parallels = basemap.drawparallels(numpy.arange(south, north, dlat),
labels=[1, 0, 0, 0], linewidth=linewidth)
def draw_countries(basemap, linewidth=1, style='dashed'):
"""
Draw the country borders using the given basemap.
Parameters:
* basemap : mpl_toolkits.basemap.Basemap
The basemap used for plotting (see :func:`~fatiando.vis.mpl.basemap`)
* linewidth : float
The width of the lines
* style : str
The style of the lines. Can be: 'solid', 'dashed', 'dashdot' or
'dotted'
"""
lines = basemap.drawcountries(linewidth=linewidth)
lines.set_linestyles(style)
def draw_coastlines(basemap, linewidth=1, style='solid'):
"""
Draw the coastlines using the given basemap.
Parameters:
* basemap : mpl_toolkits.basemap.Basemap
The basemap used for plotting (see :func:`~fatiando.vis.mpl.basemap`)
* linewidth : float
The width of the lines
* style : str
The style of the lines. Can be: 'solid', 'dashed', 'dashdot' or
'dotted'
"""
lines = basemap.drawcoastlines(linewidth=linewidth)
lines.set_linestyles(style)
def basemap(area, projection, resolution='c'):
"""
Make a basemap to use when plotting with map projections.
Uses the matplotlib basemap toolkit.
Parameters:
* area : list
``[west, east, south, north]``, i.e., the area of the data that is
going to be plotted
* projection : str
The name of the projection you want to use. Choose from:
* 'ortho': Orthographic
* 'geos': Geostationary
* 'robin': Robinson
* 'cass': Cassini
* 'merc': Mercator
* 'poly': Polyconic
* 'lcc': Lambert Conformal
* 'stere': Stereographic
* resolution : str
The resolution for the coastlines. Can be 'c' for crude, 'l' for low,
'i' for intermediate, 'h' for high
Returns:
* basemap : mpl_toolkits.basemap.Basemap
The basemap
"""
if projection not in ['ortho', 'aeqd', 'geos', 'robin', 'cass', 'merc',
'poly', 'lcc', 'stere']:
raise ValueError("Unsuported projection '%s'" % (projection))
global Basemap
if Basemap is None:
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
raise
west, east, south, north = area
lon_0 = 0.5 * (east + west)
lat_0 = 0.5 * (north + south)
if projection == 'ortho':
bm = Basemap(projection=projection, lon_0=lon_0, lat_0=lat_0,
resolution=resolution)
elif projection == 'geos' or projection == 'robin':
bm = Basemap(projection=projection, lon_0=lon_0, resolution=resolution)
elif (projection == 'cass' or
projection == 'poly'):
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_0=lat_0,
lon_0=lon_0, resolution=resolution)
elif projection == 'merc':
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_ts=lat_0,
resolution=resolution)
elif projection == 'lcc':
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_0=lat_0,
lon_0=lon_0, rsphere=(6378137.00, 6356752.3142),
lat_1=lat_0, resolution=resolution)
elif projection == 'stere':
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_0=lat_0,
lon_0=lon_0, lat_ts=lat_0, resolution=resolution)
return bm
def m2km(axis=None):
"""
Convert the x and y tick labels from meters to kilometers.
Parameters:
* axis : matplotlib axis instance
The plot.
.. tip:: Use ``fatiando.vis.gca()`` to get the current axis. Or the value
returned by ``fatiando.vis.subplot`` or ``matplotlib.pyplot.subplot``.
"""
if axis is None:
axis = pyplot.gca()
axis.set_xticklabels(['%g' % (0.001 * l) for l in axis.get_xticks()])
axis.set_yticklabels(['%g' % (0.001 * l) for l in axis.get_yticks()])
def set_area(area):
"""
Set the area of a Matplolib plot using xlim and ylim.
Parameters:
* area : list = [x1, x2, y1, y2]
Coordinates of the top right and bottom left corners of the area
"""
x1, x2, y1, y2 = area
pyplot.xlim(x1, x2)
pyplot.ylim(y1, y2)
def points(pts, style='.k', size=10, label=None, xy2ne=False):
"""
Plot a list of points.
Parameters:
* pts : list of lists
List of [x, y] pairs with the coordinates of the points
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* size : int
Size of the plotted points
* label : str
If not None, then the string that will show in the legend
* xy2ne : True or False
If True, will exchange the x and y axis so that the x coordinates of
the polygon are north. Use this when drawing on a map viewed from
above. If the y-axis of the plot is supposed to be z (depth), then use
``xy2ne=False``.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
x, y = numpy.array(pts).T
if xy2ne:
x, y = y, x
kwargs = {}
if label is not None:
kwargs['label'] = label
return pyplot.plot(x, y, style, markersize=size, **kwargs)
def paths(pts1, pts2, style='-k', linewidth=1, label=None):
"""
Plot paths between the two sets of points.
Parameters:
* pts1 : list of lists
List of (x, y) pairs with the coordinates of the points
* pts2 : list of lists
List of (x, y) pairs with the coordinates of the points
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* linewidth : float
The width of the lines representing the paths
* label : str
If not None, then the string that will show in the legend
"""
kwargs = {'linewidth': linewidth}
if label is not None:
kwargs['label'] = label
for p1, p2 in zip(pts1, pts2):
pyplot.plot([p1[0], p2[0]], [p1[1], p2[1]], style, **kwargs)
def layers(thickness, values, style='-k', z0=0., linewidth=1, label=None,
**kwargs):
"""
Plot a series of layers and values associated to each layer.
Parameters:
* thickness : list
The thickness of each layer in order of increasing depth
* values : list
The value associated with each layer in order of increasing
depth
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* z0 : float
The depth of the top of the first layer
* linewidth : float
Line width
* label : str
label associated with the square.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
if len(thickness) != len(values):
raise ValueError("thickness and values must have same length")
nlayers = len(thickness)
interfaces = [z0 + sum(thickness[:i]) for i in xrange(nlayers + 1)]
ys = [interfaces[0]]
for y in interfaces[1:-1]:
ys.append(y)
ys.append(y)
ys.append(interfaces[-1])
xs = []
for x in values:
xs.append(x)
xs.append(x)
kwargs['linewidth'] = linewidth
if label is not None:
kwargs['label'] = label
plot, = pyplot.plot(xs, ys, style, **kwargs)
return plot
def square(area, style='-k', linewidth=1, fill=None, alpha=1., label=None,
xy2ne=False):
"""
Plot a square.
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the square
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* linewidth : float
Line width
* fill : str
A color string used to fill the square. If None, the square is not
filled
* alpha : float
Transparency of the fill (1 >= alpha >= 0). 0 is transparent and 1 is
opaque
* label : str
label associated with the square.
* xy2ne : True or False
If True, will exchange the x and y axis so that the x coordinates of
the polygon are north. Use this when drawing on a map viewed from
above. If the y-axis of the plot is supposed to be z (depth), then use
``xy2ne=False``.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
x1, x2, y1, y2 = area
if xy2ne:
x1, x2, y1, y2 = y1, y2, x1, x2
xs = [x1, x1, x2, x2, x1]
ys = [y1, y2, y2, y1, y1]
kwargs = {'linewidth': linewidth}
if label is not None:
kwargs['label'] = label
plot, = pyplot.plot(xs, ys, style, **kwargs)
if fill is not None:
pyplot.fill(xs, ys, color=fill, alpha=alpha)
return plot
def squaremesh(mesh, prop, cmap=pyplot.cm.jet, vmin=None, vmax=None):
"""
Make a pseudo-color plot of a mesh of squares
Parameters:
* mesh : :class:`fatiando.mesher.SquareMesh` or compatible
The mesh (a compatible mesh must implement the methods ``get_xs`` and
``get_ys``)
* prop : str
The physical property of the squares to use as the color scale.
* cmap : colormap
Color map to be used. (see pyplot.cm module)
* vmin, vmax : float
Saturation values of the colorbar.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
if prop not in mesh.props:
raise ValueError("Can't plot because 'mesh' doesn't have property '%s'"
% (prop))
xs = mesh.get_xs()
ys = mesh.get_ys()
X, Y = numpy.meshgrid(xs, ys)
V = numpy.reshape(mesh.props[prop], mesh.shape)
plot = pyplot.pcolor(X, Y, V, cmap=cmap, vmin=vmin, vmax=vmax, picker=True)
pyplot.xlim(xs.min(), xs.max())
pyplot.ylim(ys.min(), ys.max())
return plot
def polygon(polygon, style='-k', linewidth=1, fill=None, alpha=1., label=None,
xy2ne=False, linealpha=1.):
"""
Plot a polygon.
Parameters:
* polygon : :class:`fatiando.mesher.Polygon`
The polygon
* style : str
Color and line style string (as in matplotlib.pyplot.plot)
* linewidth : float
Line width
* fill : str
A color string used to fill the polygon. If None, the polygon is not
filled
* alpha : float
Transparency of the fill (1 >= alpha >= 0). 0 is transparent and 1 is
opaque
* linealpha : float
Transparency of the line (1 >= alpha >= 0). 0 is transparent and 1 is
opaque
* label : str
String with the label identifying the polygon in the legend
* xy2ne : True or False
If True, will exchange the x and y axis so that the x coordinates of
the polygon are north. Use this when drawing on a map viewed from
above. If the y-axis of the plot is supposed to be z (depth), then use
``xy2ne=False``.
Returns:
* lines : matplotlib Line object
Line corresponding to the polygon plotted
"""
if xy2ne:
tmpx = [y for y in polygon.y]
tmpx.append(polygon.y[0])
tmpy = [x for x in polygon.x]
tmpy.append(polygon.x[0])
else:
tmpx = [x for x in polygon.x]
tmpx.append(polygon.x[0])
tmpy = [y for y in polygon.y]
tmpy.append(polygon.y[0])
kwargs = {'linewidth': linewidth, 'alpha': linealpha}
if label is not None:
kwargs['label'] = label
line, = pyplot.plot(tmpx, tmpy, style, **kwargs)
if fill is not None:
pyplot.fill(tmpx, tmpy, color=fill, alpha=alpha)
return line
def contour(x, y, v, shape, levels, interp=False, extrapolate=False, color='k',
label=None, clabel=True, style='solid', linewidth=1.0,
basemap=None):
"""
Make a contour plot of the data.
Parameters:
* x, y : array
Arrays with the x and y coordinates of the grid points. If the data is
on a regular grid, then assume x varies first (ie, inner loop), then y.
* v : array
The scalar value assigned to the grid points.
* shape : tuple = (ny, nx)
Shape of the regular grid.
If interpolation is not False, then will use *shape* to grid the data.
* levels : int or list
Number of contours to use or a list with the contour values.
* interp : True or False
Wether or not to interpolate before trying to plot. If data is not on
regular grid, set to True!
* extrapolate : True or False
Wether or not to extrapolate the data when interp=True
* color : str
Color of the contour lines.
* label : str
String with the label of the contour that would show in a legend.
* clabel : True or False
Wether or not to print the numerical value of the contour lines
* style : str
The style of the contour lines. Can be ``'dashed'``, ``'solid'`` or
``'mixed'`` (solid lines for positive contours and dashed for negative)
* linewidth : float
Width of the contour lines
* basemap : mpl_toolkits.basemap.Basemap
If not None, will use this basemap for plotting with a map projection
(see :func:`~fatiando.vis.mpl.basemap` for creating basemaps)
Returns:
* levels : list
List with the values of the contour levels
"""
if style not in ['solid', 'dashed', 'mixed']:
raise ValueError("Invalid contour style %s" % (style))
if x.shape != y.shape != v.shape:
raise ValueError("Input arrays x, y, and v must have same shape!")
if interp:
x, y, v = fatiando.gridder.interp(x, y, v, shape,
extrapolate=extrapolate)
X = numpy.reshape(x, shape)
Y = numpy.reshape(y, shape)
V = numpy.reshape(v, shape)
kwargs = dict(colors=color, picker=True)
if basemap is None:
ct_data = pyplot.contour(X, Y, V, levels, **kwargs)
pyplot.xlim(X.min(), X.max())
pyplot.ylim(Y.min(), Y.max())
else:
lon, lat = basemap(X, Y)
ct_data = basemap.contour(lon, lat, V, levels, **kwargs)
if clabel:
ct_data.clabel(fmt='%g')
if label is not None:
ct_data.collections[0].set_label(label)
if style != 'mixed':
for c in ct_data.collections:
c.set_linestyle(style)
for c in ct_data.collections:
c.set_linewidth(linewidth)
return ct_data.levels
def contourf(x, y, v, shape, levels, interp=False, extrapolate=False,
vmin=None, vmax=None, cmap=pyplot.cm.jet, basemap=None):
"""
Make a filled contour plot of the data.
Parameters:
* x, y : array
Arrays with the x and y coordinates of the grid points. If the data is
on a regular grid, then assume x varies first (ie, inner loop), then y.
* v : array
The scalar value assigned to the grid points.
* shape : tuple = (ny, nx)
Shape of the regular grid.
If interpolation is not False, then will use *shape* to grid the data.
* levels : int or list
Number of contours to use or a list with the contour values.
* interp : True or False
Wether or not to interpolate before trying to plot. If data is not on
regular grid, set to True!
* extrapolate : True or False
Wether or not to extrapolate the data when interp=True
* vmin, vmax
Saturation values of the colorbar. If provided, will overwrite what is
set by *levels*.
* cmap : colormap
Color map to be used. (see pyplot.cm module)
* basemap : mpl_toolkits.basemap.Basemap
If not None, will use this basemap for plotting with a map projection
(see :func:`~fatiando.vis.mpl.basemap` for creating basemaps)
Returns:
* levels : list
List with the values of the contour levels
"""
if x.shape != y.shape != v.shape:
raise ValueError("Input arrays x, y, and v must have same shape!")
if interp:
x, y, v = fatiando.gridder.interp(x, y, v, shape,
extrapolate=extrapolate)
X = numpy.reshape(x, shape)
Y = numpy.reshape(y, shape)
V = numpy.reshape(v, shape)
kwargs = dict(vmin=vmin, vmax=vmax, cmap=cmap, picker=True)
if basemap is None:
ct_data = pyplot.contourf(X, Y, V, levels, **kwargs)
pyplot.xlim(X.min(), X.max())
pyplot.ylim(Y.min(), Y.max())
else:
lon, lat = basemap(X, Y)
ct_data = basemap.contourf(lon, lat, V, levels, **kwargs)
return ct_data.levels
def pcolor(x, y, v, shape, interp=False, extrapolate=False, cmap=pyplot.cm.jet,
vmin=None, vmax=None, basemap=None):
"""
Make a pseudo-color plot of the data.
Parameters:
* x, y : array
Arrays with the x and y coordinates of the grid points. If the data is
on a regular grid, then assume x varies first (ie, inner loop), then y.
* v : array
The scalar value assigned to the grid points.
* shape : tuple = (ny, nx)
Shape of the regular grid.
If interpolation is not False, then will use *shape* to grid the data.
* interp : True or False
Wether or not to interpolate before trying to plot. If data is not on
regular grid, set to True!
* extrapolate : True or False
Wether or not to extrapolate the data when interp=True
* cmap : colormap
Color map to be used. (see pyplot.cm module)
* vmin, vmax
Saturation values of the colorbar.
* basemap : mpl_toolkits.basemap.Basemap
If not None, will use this basemap for plotting with a map projection
(see :func:`~fatiando.vis.mpl.basemap` for creating basemaps)
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
if x.shape != y.shape != v.shape:
raise ValueError("Input arrays x, y, and v must have same shape!")
if vmin is None:
vmin = v.min()
if vmax is None:
vmax = v.max()
if interp:
x, y, v = fatiando.gridder.interp(x, y, v, shape,
extrapolate=extrapolate)
X = numpy.reshape(x, shape)
Y = numpy.reshape(y, shape)
V = numpy.reshape(v, shape)
if basemap is None:
plot = pyplot.pcolor(X, Y, V, cmap=cmap, vmin=vmin, vmax=vmax,
picker=True)
pyplot.xlim(X.min(), X.max())
pyplot.ylim(Y.min(), Y.max())
else:
lon, lat = basemap(X, Y)
plot = basemap.pcolor(lon, lat, V, cmap=cmap, vmin=vmin, vmax=vmax,
picker=True)
return plot
def seismic_wiggle(section, dt=0.004, ranges=None, scale=1.,
color='k', normalize=False):
"""
Plot a seismic section (numpy 2D array matrix) as wiggles.
Parameters:
* section : 2D array
matrix of traces (first dimension time, second dimension traces)
* dt : float
sample rate in seconds (default 4 ms)
* ranges : (x1, x2)
min and max horizontal values (default trace number)
* scale : float
scale factor multiplied by the section values before plotting
* color : tuple of strings
Color for filling the wiggle, positive and negative lobes.
* normalize :
True to normalizes all trace in the section using global max/min
data will be in the range (-0.5, 0.5) zero centered
.. warning::
Slow for more than 200 traces, in this case decimate your
data or use ``seismic_image``.
"""
npts, ntraces = section.shape # time/traces
if ntraces < 1:
raise IndexError("Nothing to plot")
if npts < 1:
raise IndexError("Nothing to plot")
t = numpy.linspace(0, dt*npts, npts)
amp = 1. # normalization factor
gmin = 0. # global minimum
toffset = 0. # offset in time to make 0 centered
if normalize:
gmax = section.max()
gmin = section.min()
amp = (gmax-gmin)
toffset = 0.5
pyplot.ylim(max(t), 0)
if ranges is None:
ranges = (0, ntraces)
x0, x1 = ranges
# horizontal increment
dx = float((x1-x0)/ntraces)
pyplot.xlim(x0, x1)
for i, trace in enumerate(section.transpose()):
tr = (((trace-gmin)/amp)-toffset)*scale*dx
x = x0+i*dx # x positon for this trace
pyplot.plot(x+tr, t, 'k')
pyplot.fill_betweenx(t, x+tr, x, tr > 0, color=color)
def seismic_image(section, dt=0.004, ranges=None, cmap=pyplot.cm.gray,
aspect=None, vmin=None, vmax=None):
"""
Plot a seismic section (numpy 2D array matrix) as an image.
Parameters:
* section : 2D array
matrix of traces (first dimension time, second dimension traces)
* dt : float
sample rate in seconds (default 4 ms)
* ranges : (x1, x2)
min and max horizontal values (default trace number)
* cmap : colormap
color map to be used. (see pyplot.cm module)
* aspect : float
matplotlib imshow aspect parameter, ratio between axes
* vmin, vmax : float
min and max values for imshow
"""
npts, maxtraces = section.shape # time/traces
if maxtraces < 1:
raise IndexError("Nothing to plot")
if npts < 1:
raise IndexError("Nothing to plot")
t = numpy.linspace(0, dt*npts, npts)
data = section
if ranges is None:
ranges = (0, maxtraces)
x0, x1 = ranges
extent = (x0, x1, t[-1:], t[0])
if aspect is None: # guarantee a rectangular picture
aspect = numpy.round((x1-x0)/numpy.max(t))
aspect -= aspect*0.2
pyplot.imshow(data, aspect=aspect, cmap=cmap, origin='upper',
extent=extent, vmin=vmin, vmax=vmax)
| bsd-3-clause |
mattsep/TDSE | src/animate.py | 1 | 1444 | import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# animation of the probability density of the wavefunction over the course
# of time
def probabilityDensity(x, t, V, psi):
# convert to the probability density
Nt = len(t)
rho = sp.real(sp.conjugate(psi)*psi)
# set the first frame properties and grab the line handles
fig, ax = plt.subplots()
line1, line2, line3, line4 = ax.plot(x, rho[:,1], 'k',
x, sp.real(psi[:,1]), 'b:',
x, sp.imag(psi[:,1]), 'r:',
x, V, 'm--',
linewidth=2.0)
ax.set_xlabel("Position")
ax.set_ylabel("Probability Density")
ax.set_ylim([-rho.max(), rho.max()])
ax.set_xlim([min(x), max(x)])
# the animation function, to be called repeatedly
def animate(i):
# set the new data each frame
line1.set_ydata(rho[:,i])
line2.set_ydata(sp.real(psi[:,i]))
line3.set_ydata(sp.imag(psi[:,i]))
return line1, line2, line3
# the initialization function, useful when blit=True
def init():
line1.set_ydata(sp.ma.array(x, mask=True))
line2.set_ydata(sp.ma.array(x, mask=True))
line3.set_ydata(sp.ma.array(x, mask=True))
return line1, line2, line3
# perform the animation
ani = animation.FuncAnimation(fig, animate, sp.arange(1,Nt),
init_func=init, interval=25, blit=True)
plt.show()
| gpl-3.0 |
jgoppert/pymola | test/xml_test.py | 1 | 4050 | #!/usr/bin/env python
"""
Test XML backend
"""
import os
import sys
import time
import unittest
import pymoca.parser as mo_parser
from pymoca.backends.xml import analysis, generator, sim_scipy
from pymoca.backends.xml import parser as xml_parser
# get matplotlib from analysis, since logic for plotting
# without display already handled there
from pymoca.backends.xml.analysis import plt
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(TEST_DIR, 'models')
GENERATED_DIR = os.path.join(TEST_DIR, 'generated')
class XmlTest(unittest.TestCase):
"""
Xml tests
"""
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def flush():
sys.stdout.flush()
sys.stdout.flush()
time.sleep(0.01)
def test_noise(self):
# compile to ModelicaXML
with open(os.path.join(MODEL_DIR, 'Noise.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
model_xml = generator.generate(ast_tree, 'Noise')
# save xml model to disk
with open(os.path.join(GENERATED_DIR, 'Noise.xml'), 'w') as f:
f.write(model_xml)
# load xml model
model = xml_parser.parse(model_xml, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 1, 'dt': 0.001, 'verbose': True})
# plot
analysis.plot(data, fields=['x', 'm'])
plt.draw()
plt.pause(0.1)
plt.close()
def test_simple_circuit(self):
# compile to ModelicaXML
with open(os.path.join(MODEL_DIR, 'SimpleCircuit.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
model_xml = generator.generate(ast_tree, 'SimpleCircuit')
# save xml model to disk
with open(os.path.join(GENERATED_DIR, 'SimpleCircuit.xml'), 'w') as f:
f.write(model_xml)
# load xml model
model = xml_parser.parse(model_xml, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 1, 'dt': 0.001, 'verbose': True})
# plot
analysis.plot(data, fields=['x', 'c', 'm'])
plt.draw()
plt.pause(0.1)
plt.close()
def test_bouncing_ball(self):
# generate
with open(os.path.join(MODEL_DIR, 'BouncingBall.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
generator.generate(ast_tree, 'BouncingBall')
# parse
example_file = os.path.join(
MODEL_DIR, 'bouncing-ball.xml')
model = xml_parser.parse_file(example_file, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
model_ode.prop['x']['start'] = 1
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 3.5, 'dt': 0.01, 'verbose': True})
# plot
analysis.plot(data, linewidth=0.5, marker='.', markersize=0.5)
plt.draw()
plt.pause(0.1)
plt.close()
# simulate in soft real-time
do_realtime = False
if do_realtime:
print('\nsoft-realtime simulation')
time_start = time.time()
def realtime_callback(t, x, y, m, p, c):
t_real = time.time() - time_start
lag = t_real - t
if abs(lag) > 0.1:
print("real: {:10f} > sim: {:10f}, lag: {:10f}".format(t_real, t, lag))
elif lag < 0:
time.sleep(-lag)
data = sim_scipy.sim(model_ode, {'tf': 3.5, 'dt': 0.01, 'verbose': True},
user_callback=realtime_callback)
# plt.gca().set_ylim(-2, 2)
self.flush()
| bsd-3-clause |
trankmichael/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
trachelr/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 19 | 1304 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/fixes.py | 1 | 29568 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from __future__ import division
import collections
from operator import itemgetter
import inspect
import warnings
import numpy as np
import scipy
from scipy import linalg, sparse
from math import ceil, log
from numpy.fft import irfft
from distutils.version import LooseVersion
from functools import partial
from .externals import six
from .externals.six.moves import copyreg, xrange
from gzip import GzipFile
###############################################################################
# Misc
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in inspect.getargspec(filtfilt)[0]:
return filtfilt
return _filtfilt
def _get_argrelmax():
try:
from scipy.signal import argrelmax
except ImportError:
argrelmax = _argrelmax
return argrelmax
def _argrelmax(data, axis=0, order=1, mode='clip'):
"""Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
"""
comparator = np.greater
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return np.where(results)
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
wongkaiweng/LTLMoP | src/lib/handlers/share/MotionControl/BugControllerHandler.py | 8 | 39771 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===================================================================
BugController.py - Bug Algorithm Motion Controller
===================================================================
This motion controller uses the Bug 2 algorithm developed by V. Lumelsky and A. Stepanov. The algorithm assumes the robot is a
point operating in the plane with a contact sensor or a zero range sensor to
detect obstacles. When the robot has a finite range (non-zero range) sensor
"""
#import BugControllerHelper
from numpy import *
from __is_inside import is_inside
import Polygon,Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
import time, math
import sys,os
import matplotlib.pyplot as plt
import copy
from scipy.linalg import norm
from math import *
import random
import matplotlib.animation as animation
import thread
import threading
import lib.handlers.handlerTemplates as handlerTemplates
class BugControllerHandler(handlerTemplates.MotionControlHandler):
def __init__(self, executor, shared_data,robot_type):
"""
Bug alogorithm motion planning controller
robot_type (int): Which robot is used for execution. pioneer is 1, ODE is 2 (default=1)
"""
self.velocity_count_thres = 100;
self.velocity_count = 0;
#operate_system (int): Which operating system is used for execution. Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
# Information about the robot
## 1: Pioneer ; 2: 0DE
if robot_type not in [1,2]:
robot_type = 1
self.system = robot_type
#settings for Ubuntu or Mac plotting (only when you set operate_system = 1)
self.PLOT = False # plot with matplot
self.PLOT_M_LINE = False # plot m-line
self.PLOT_EXIT = False # plot exit point of a region
self.PLOT_OVERLAP = False # plot overlap area with the obstacle
#settings for Windows (only when you set operate_system = 2)
self.PLOT_WINDOWS = True
# Get references to handlers we'll need to communicate with
self.drive_handler = executor.hsub.getHandlerInstanceByType(handlerTemplates.DriveHandler)
self.pose_handler = executor.hsub.getHandlerInstanceByType(handlerTemplates.PoseHandler)
if self.system == 1:
self.robocomm = shared_data['robocomm']
# Get information about regions
self.proj = executor.proj
self.coordmap_map2lab = self.hsub.coordmap_map2lab
self.coordmap_lab2map = self.hsub.coordmap_lab2map
self.last_warning = 0
###################################
########used by Bug algorithm######
###################################
# PARAMETERS
#for plotting
self.time = time.clock()
self.time_thres = 1;
# Pioneer related parameters
self.PioneerWidthHalf = 0.20 #0.25 # (m) width of Pioneer #0.20
self.PioneerLengthHalf = 0.25 #0.30 (m) lenght of Pioneer #0.25
# Real Robot polygon related parameters
self.boxRealVertical = self.PioneerLengthHalf*2
self.boxRealHorizontal = self.PioneerWidthHalf*2.5
self.boxRealVertical_shift = self.boxRealVertical/2
self.boxRealHorizontal_shift = self.boxRealHorizontal/2
# Pioneer Range related parameters
self.range = 2*self.PioneerLengthHalf+0.40 # (m) specify the range of the robot (when the normal circle range cannot detect obstacle) #0.85
self.obsRange = self.range*0.7 # (m) range that says the robot detects obstacles #0.25
self.shift = 0.20 # How far the range is shifted to ensure it is sensing region in front is bigger 0.20
self.boxVertical = self.obsRange*2 # box cutting from range of Pioneer
self.boxHorizontal = self.obsRange*2 # box cutting from range of Pioneer
self.boxVertical_shift = self.boxVertical + self.boxRealVertical/2*1.5 # vertical shifting of box
self.boxHorizontal_shift = self.boxHorizontal/2 # horizontal shifting of the box
## 2: 0DE
self.factorODE = 30 # 30 works better than 50
if self.system == 2:
self.PioneerWidthHalf = self.PioneerWidthHalf*self.factorODE
self.PioneerLengthHalf = self.PioneerLengthHalf*self.factorODE
self.range = self.range*self.factorODE
self.obsRange = self.obsRange*self.factorODE
self.shift = self.shift*self.factorODE
self.boxVertical = self.boxVertical*self.factorODE
self.boxHorizontal = self.boxHorizontal*self.factorODE
self.boxVertical_shift = self.boxVertical_shift*self.factorODE
self.boxHorizontal_shift = self.boxHorizontal_shift*self.factorODE
self.boxRealVertical = self.boxRealVertical*self.factorODE
self.boxRealHorizontal = self.boxRealHorizontal*self.factorODE
self.boxRealVertical_shift = self.boxRealVertical_shift*self.factorODE
self.boxRealHorizontal_shift= self.boxRealHorizontal_shift*self.factorODE
self.map = {} # dictionary for all the regions
self.all = Polygon.Polygon() # Polygon with all the regions
self.map_work = Polygon.Polygon() # Polygon of the current region and next region considered
self.ogr = Polygon.Polygon() #Polygon built from occupancy grid data points
self.previous_current_reg = None # previous current region
self.currentRegionPoly = None # current region's polygon
self.nextRegionPoly = None # next region's polygon
self.overlap = None
self.q_g = [0,0] # goal point of the robot heading to
self.q_hit = [0,0] # location where the robot first detect an obstacle
self.boundary_following= False # tracking whether it is in boundary following mode
self.m_line = None # m-line polygon
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector connecting a point on the obstacle to the robot
self.q_hit_count = 0
self.q_hit_Thres = 1000
self.prev_follow = [[],[]]
## Construct robot polygon (for checking overlap)
pose = self.pose_handler.getPose()
self.prev_pose = pose
self.robot = PolyShapes.Rectangle(self.boxHorizontal,self.boxVertical)
self.robot.shift(pose[0]-self.boxHorizontal_shift,pose[1]-self.boxVertical_shift)
self.robot = PolyShapes.Circle(self.obsRange,(pose[0],pose[1])) - self.robot
self.robot.rotate(pose[2]-pi/2,pose[0],pose[1])
self.robot.shift(self.shift*cos(pose[2]),self.shift*sin(pose[2]))
#construct real robot polygon( see if there is overlaping with path to goal
self.realRobot = PolyShapes.Rectangle(self.boxRealHorizontal,self.boxRealVertical )
self.realRobot.shift(pose[0]-self.boxRealHorizontal_shift,pose[1]-self.boxRealVertical_shift)
self.realRobot.rotate(pose[2]-pi/2,pose[0],pose[1])
#constructing polygon of different regions (holes being taken care)
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
#construct a polygon that included all the regions
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
#setting for plotting
if self.operate_system == 1:
if self.PLOT or self.PLOT_OVERLAP == True:
self.original_figure = 1
plt.figure(self.original_figure)
if self.PLOT_EXIT or self.PLOT_M_LINE == True:
self.overlap_figure = 2
plt.figure(self.overlap_figure)
else:
if self.PLOT_WINDOWS == True:
# start using anmination to plot Pioneer
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
#Plot the robot on the map in figure 1
if self.PLOT == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
q_gBundle = [[],[]] # goal bundle
q_overlap = [[],[]] # overlapping points with robot range
pose = self.pose_handler.getPose() # Find our current configuration
#Plot the robot on the map in figure 1
if self.PLOT == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return False
# Check if Vicon has cut out
if math.isnan(pose[2]):
print "no vicon pose"
print "WARNING: No Vicon data! Pausing."
#self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part is run when the robot goes to a new region, otherwise, the original map will be used.
if not self.previous_current_reg == current_reg:
#print 'getting into bug alogorithm'
#clean up the previous self.map_work
self.map_work = Polygon.Polygon()
# NOTE: Information about region geometry can be found in self.proj.rfi.regions
# create polygon list for regions other than the current_reg and the next_reg
self.map_work += self.map[self.proj.rfi.regions[current_reg].name]
self.map_work += self.map[self.proj.rfi.regions[next_reg].name]
# building current polygon and destination polygon
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
#set to zero velocity before finding the tranFace
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
print "Current reg is " + str(self.proj.rfi.regions[current_reg].name.lower())
print "Next reg is "+ str(self.proj.rfi.regions[next_reg].name.lower())
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
q_gBundle = q_gBundle.transpose()
# Find the closest face to the current position
max_magsq = 1000000
for tf in q_gBundle:
magsq = (tf[0] - pose[0])**2 + (tf[1] - pose[1])**2
if magsq < max_magsq:
connection = 0
tf = tf+(tf-asarray(self.currentRegionPoly.center()))/norm(tf-asarray(self.currentRegionPoly.center()))*2.1*self.PioneerLengthHalf
if not self.nextRegionPoly.covers(PolyShapes.Circle(self.PioneerLengthHalf*2,(tf[0],tf[1]))):
tf = tf-(tf-asarray(self.currentRegionPoly.center()))/norm(tf-asarray(self.currentRegionPoly.center()))*4.2*self.PioneerLengthHalf
if self.nextRegionPoly.covers(PolyShapes.Circle(self.PioneerLengthHalf*2,(tf[0],tf[1]))):
connection = 1
else:
connection = 1
if connection == 1:
pt1 = tf
max_magsq = magsq
transFace = 1
self.q_g[0] = pt1[0]
self.q_g[1] = pt1[1]
else:
sample = False
while not sample:
self.q_g[0],self.q_g[1] = self.nextRegionPoly.sample(random.random)
robo = PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
if not bool(robo - self.nextRegionPoly):
sample = True
"""
# Push the goal point to somewhere inside the next region to ensure the robot will get there.(CHECK!!)
self.q_g = self.q_g+(self.q_g-asarray(self.currentRegionPoly.center()))/norm(self.q_g-asarray(self.currentRegionPoly.center()))*3*self.PioneerLengthHalf
if not self.nextRegionPoly.isInside(self.q_g[0],self.q_g[1]):
self.q_g = self.q_g-(self.q_g-asarray(self.currentRegionPoly.center()))/norm(self.q_g-asarray(self.currentRegionPoly.center()))*6*self.PioneerLengthHalf
"""
#plot exiting point
if self.PLOT_EXIT == True:
plt.figure(self.overlap_figure)
plt.clf()
plt.plot(q_gBundle[:,0],q_gBundle[:,1],'ko' )
plt.plot(self.q_g[0],self.q_g[1],'ro')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
##################################################
#######check whether obstacle is detected#########
##################################################
#Update pose,update self.robot, self.realRobot orientation
self.robot.shift(pose[0]-self.prev_pose[0],pose[1]-self.prev_pose[1])
self.realRobot.shift(pose[0]-self.prev_pose[0],pose[1]-self.prev_pose[1])
self.robot.rotate(pose[2]-self.prev_pose[2],pose[0],pose[1])
self.realRobot.rotate(pose[2]-self.prev_pose[2],pose[0],pose[1])
self.prev_pose = pose
############################
########### STEP 1##########
############################
##Check whether obsRange overlaps with obstacle or the boundary (overlap returns the part of robot not covered by the region
# for real Pioneer robot
if self.system == 1:
# motion controller is not in boundary following mode
if self.boundary_following == False:
if self.robocomm.getReceiveObs() == False:
overlap = self.robot - ( self.map_work)
else:
overlap = self.robot - ( self.map_work - self.robocomm.getObsPoly())
else: #TRUE
# use a robot with full range all around it
Robot = PolyShapes.Circle(self.obsRange,(pose[0],pose[1]))
Robot.shift(self.shift*cos(pose[2]),self.shift*sin(pose[2]))
if self.robocomm.getReceiveObs() == False:
overlap = Robot - ( self.map_work)
else:
overlap = Robot - ( self.map_work - self.robocomm.getObsPoly())
# for ODE
else:
if self.boundary_following == False:
overlap = self.robot - (self.map_work)
else:#TRUE
overlap = self.robot - (self.map_work)
if self.boundary_following == False:
if bool(overlap): ## overlap of obstacles
#print "There MAYBE overlap~~ check connection to goal"
# check whether the real robot or and path to goal overlap with the obstacle
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
path = PolyUtils.convexHull(self.realRobot + QGoalPoly)
if self.system == 1:
if self.robocomm.getReceiveObs() == False:
pathOverlap = path - ( self.map_work)
else:
pathOverlap = path - ( self.map_work - self.robocomm.getObsPoly())
else:
pathOverlap = path - ( self.map_work)
if bool(pathOverlap): # there is overlapping, go into bounding following mode
#print "There IS overlap"
self.q_hit = mat([pose[0],pose[1]]).T
self.boundary_following = True
#Generate m-line polygon
QHitPoly = PolyShapes.Circle(self.PioneerLengthHalf/4,(pose[0],pose[1]))
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf/4,(self.q_g[0],self.q_g[1]))
self.m_line = PolyUtils.convexHull(QHitPoly + QGoalPoly)
#plot the first overlap
if self.PLOT_M_LINE == True:
plt.figure(self.overlap_figure)
plt.clf()
self.plotPoly(QHitPoly,'k')
self.plotPoly(QGoalPoly,'k')
self.plotPoly(overlap,'g')
self.plotPoly(self.m_line,'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
else: ##head towards the q_goal
if self.system == 1:
if self.robocomm.getReceiveObs() == False: # wait for obstacles from Pioneer
vx = 0
vy = 0
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
#print "no obstacles 2-ODE true"
else: ##head towards the q_goal
if self.system == 1:
if self.robocomm.getReceiveObs() == False: # wait for obstacles from Pioneer
vx = 0
vy = 0
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
#print "no obstacles 1-ODE true"
if self.boundary_following == True:
self.q_hit_count += 1
# finding the point to go normal to (closest overlapping point)
j = 0
recheck = 0
while not bool(overlap):
# cannot see the obstacle. Based on the location of the previous point blow up the range of the robot on the left or on the right
j += 1
# finding whether the previous obstacle point is on the left side or the right side of the robot
# angle = angle of the previous point from the x-axis of the field
# omega = angle of the current Pioneer orientation from the x-axis of the field
# cc = differnece between angle and omega ( < pi = previous point on the left of robot, else on the right of robot)
x = self.prev_follow[0] -pose[0]
y = self.prev_follow[1] -pose[1]
angle = atan(y/x)
# convert angle to 2pi
if x > 0 and y > 0:
angle = angle
elif x < 0 and y > 0:
angle = pi + angle
elif x <0 and y < 0:
angle = pi + angle
else:
angle = 2*pi + angle
# convert pose to 2pi
if pose[2] < 0:
omega = (2*pi + pose[2])
else:
omega = pose[2]
if omega > angle:
cc = 2*pi - (omega - angle)
else:
cc = angle - omega
# on the left
#if angle - omega > 0 and angle - omega < pi:
if cc < pi:
#print "on the left, angle: "+ str(angle) + " omega: "+ str(omega)+ " angle-omega: "+ str(angle-omega)
Robot = PolyShapes.Rectangle(self.range*2*j,self.range*2*j)
Robot.shift(pose[0]-self.range*j*2,pose[1]-self.range*j)
Robot.rotate(pose[2]-pi/2,pose[0],pose[1])
# on the right
else:
#print "on the right, angle: "+ str(angle) + " omega: "+ str(omega)+ " angle-omega: "+ str(angle-omega)
Robot = PolyShapes.Rectangle(self.range*2*j,self.range*2*j)
Robot.shift(pose[0],pose[1]-self.range*j)
Robot.rotate(pose[2]-pi/2,pose[0],pose[1])
if self.system == 1:
overlap = Robot - ( self.map_work - self.robocomm.getObsPoly())
else:
overlap = Robot - ( self.map_work)
#self.plotPoly(Robot, 'm',2)
#determines as dynamic obstacles and can be go striaight to the goal point
if j >= 2:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
overlap = None
self.overlap = overlap
self.q_hit_count = 0
self.boundary_following = False
self.m_line = None
self.drive_handler.setVelocity(vx,vy, pose[2])
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ###0.05
departed = not self.currentRegionPoly.overlaps(self.realRobot)
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
arrived = self.nextRegionPoly.covers(self.realRobot)
return arrived
##extra box plotting in figure 1#
if self.PLOT_OVERLAP == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
self.plotPoly(overlap,'g',3)
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
# find the closest point on the obstacle to the robot
overlap_len = len(overlap)
for j in range(overlap_len):
BoundPolyPoints = asarray(overlap[j])
for i in range(len(BoundPolyPoints)-1):
bundle_x = (BoundPolyPoints[i,0] +BoundPolyPoints[1+i,0])/2 #mid-point coordinate x
bundle_y = (BoundPolyPoints[i,1] +BoundPolyPoints[1+i,1])/2 #mid-point coordinate y
q_overlap = hstack((q_overlap,vstack((bundle_x,bundle_y))))
bundle_x = (BoundPolyPoints[len(BoundPolyPoints)-1,0] +BoundPolyPoints[0,0])/2 #mid-point coordinate x
bundle_y = (BoundPolyPoints[len(BoundPolyPoints)-1,1] +BoundPolyPoints[0,1])/2 #mid-point coordinate y
q_overlap = hstack((q_overlap,vstack((bundle_x,bundle_y))))
q_overlap = q_overlap.transpose()
pt = self.closest_pt([pose[0],pose[1]], vstack((q_overlap,asarray(PolyUtils.pointList(overlap)))))
self.prev_follow = pt
#calculate the vector to follow the obstacle
normal = mat([pose[0],pose[1]] - pt)
#find the distance from the closest point
distance = norm(normal)
velocity = normal * self.trans_matrix
vx = (velocity/norm(velocity)/3)[0,0]
vy = (velocity/norm(velocity)/3)[0,1]
# push or pull the robot towards the obstacle depending on whether the robot is close or far from the obstacle.
turn = pi/4*(distance-0.5*self.obsRange)/(self.obsRange) ### change to 0.6 from 0.5 for more allowance in following
corr_matrix = mat([[cos(turn),-sin(turn)],[sin(turn),cos(turn)]])
v = corr_matrix*mat([[vx],[vy]])
vx = v[0,0]
vy = v[1,0]
##plotting overlap on figure 2
if self.PLOT_OVERLAP == True:
plt.figure(self.overlap_figure)
plt.clf()
self.plotPoly(self.m_line,'b');
self.plotPoly(overlap,'r');
plt.plot(pt[0],pt[1],'ro')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
## conditions that the loop will end
#for 11111
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ####0.05
departed = not self.currentRegionPoly.overlaps(self.realRobot)
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
arrived = self.nextRegionPoly.covers(self.realRobot)
#for 33333
reachMLine= self.m_line.overlaps(RobotPoly)
# 1.reached the next region
if arrived:
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
print "arriving at the next region. Exit boundary following mode"
vx = 0
vy = 0
"""
# 2.q_hit is reencountered
elif norm(self.q_hit-mat([pose[0],pose[1]]).T) < 0.05 and self.q_hit_count > self.q_hit_Thres:
print "reencounter q_hit. cannot reach q_goal"
vx = 0
vy = 0
"""
# 3.m-line reencountered
elif reachMLine:
#print >>sys.__stdout__, "m-line overlaps RoboPoly, m-line" + str(norm(self.q_g-self.q_hit)-2*self.obsRange) + " distance: " + str(norm(self.q_g-mat([pose[0],pose[1]]).T))
if norm(self.q_g-mat([pose[0],pose[1]]).T) < norm(self.q_g-self.q_hit)-2*self.obsRange:
#print "m-line overlaps RoboPoly, m-line" + str(norm(self.q_g-self.q_hit)-2*self.obsRange) + " distance: " + str(norm(self.q_g-mat([pose[0],pose[1]]).T))
#print "leaving boundary following mode"
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
leaving = False
# turn the robot till it is facing the goal
while not leaving:
x = self.q_g[0] -self.pose_handler.getPose()[0]
y = self.q_g[1] -self.pose_handler.getPose()[1]
angle = atan(y/x)
if x > 0 and y > 0:
angle = angle
elif x < 0 and y > 0:
angle = pi + angle
elif x <0 and y < 0:
angle = pi + angle
else:
angle = 2*pi + angle
if self.pose_handler.getPose()[2] < 0:
omega = (2*pi + self.pose_handler.getPose()[2])
#print >>sys.__stdout__,"omega<0: "+ str(omega)
else:
omega = self.pose_handler.getPose()[2]
#print >>sys.__stdout__,"omega: "+ str(omega)
if omega > angle:
cc = 2*pi - (omega - angle)
else:
cc = angle - omega
# angle(goal point orientation) on the left of omega(robot orientation)
#if angle - omega > 0 and angle - omega < pi:
if cc < pi:
#print>>sys.__stdout__, "turn left"
vx,vy = self.turnLeft(cc)
# on the right
else:
#print>>sys.__stdout__, "turn right"
vx, vy = self.turnRight(2*pi-cc)
#print>>sys.__stdout__, "omega: "+ str(omega) + " angle: "+ str(angle) + " (omega-angle): " + str(omega-angle)
self.drive_handler.setVelocity(vx,vy, self.pose_handler.getPose()[2])
if omega - angle < pi/6 and omega - angle > -pi/6:
leaving = True
#Check whether the robot can leave now (the robot has to be closer to the goal than when it is at q_hit to leave)
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
path = PolyUtils.convexHull(self.realRobot + QGoalPoly)
if self.system == 1:
if self.robocomm.getReceiveObs() == False:
pathOverlap = path - ( self.map_work)
else:
pathOverlap = path - ( self.map_work - self.robocomm.getObsPoly())
else:
pathOverlap = path - ( self.map_work)
if not bool(pathOverlap):
#print "There is NO MORE obstacles in front for now."
# check if the robot is closer to the goal compared with q_hit
if norm(self.q_hit-mat(self.q_g).T) > norm(mat([pose[0],pose[1]]).T-mat(self.q_g).T) :
#print "The robot is closer than the leaving point. The robot can leave"
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
lala = 1
#print "not leaving bug algorithm. difference(-farther) =" + str(norm(self.q_hit-mat(self.q_g).T) - norm(mat([pose[0],pose[1]]).T-mat(self.q_g).T))
"""
# Pass this desired velocity on to the drive handler
# Check if there are obstacles within 0.35m of the robot, if so, stop the robot
if self.system == 1:
if self.robocomm.getSTOP() == True:
vx = 0
vy = 0
"""
#vx = 0
#vy = 0
self.overlap = overlap
self.drive_handler.setVelocity(vx,vy, pose[2])
# Set the current region as the previous current region(for checking whether the robot has arrived at the next region)
self.previous_current_reg = current_reg
# check whether robot has arrived at the next region
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ###0.05
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
departed = not self.currentRegionPoly.overlaps(self.realRobot)
arrived = self.nextRegionPoly.covers(self.realRobot)
if arrived:
self.q_hit_count = 0
self.boundary_following = False
self.m_line = None
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
print "WARNING: Left current region but not in expected destination region"
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
#print "I think I'm in " + r.name
#print pose
break
self.last_warning = time.time()
return arrived
def plotPioneer(self,number,y = 1):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
y = 0 : plot self.map_work instead of self.map
"""
if self.operate_system == 1:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
if self.system == 1:
if bool(self.robocomm.getObsPoly()):
self.plotPoly(self.robocomm.getObsPoly(),'k')
if self.operate_system == 1:
plt.figure(number).canvas.draw()
def turnLeft(self,a):
"""
Turn left with angle a
"""
vx = cos(self.pose_handler.getPose()[2]+a)* self.PioneerLengthHalf;
vy = sin(self.pose_handler.getPose()[2]+a)* self.PioneerLengthHalf;
return vx,vy
def turnRight(self,a):
"""
Turn right with angle a
"""
vx = cos(self.pose_handler.getPose()[2]-a)* self.PioneerLengthHalf;
vy = sin(self.pose_handler.getPose()[2]-a)* self.PioneerLengthHalf;
return vx,vy
def euclid(self,pt1, pt2):
"""
for calculating the minimum distance from a point on the obstacle
"""
pairs = zip(pt1, pt2) # Form pairs in corresponding dimensions
sum_sq_diffs = sum((a - b)**2 for a, b in pairs) # Find sum of squared diff
return (sum_sq_diffs)**(float(1)/2) # Take sqrt to get euclidean distance
def closest_pt(self,pt, vec):
"""
Returns the point in vec with minimum euclidean distance to pt
"""
return min(vec, key=lambda x: self.euclid(pt, x))
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
def getOldRegionName(self, regionName):
"""
This function returns the old region name (eg: r1, r2, other etc) when given the new region name (eg: p1, p2..)
"""
for oldRegionName,newRegionNames in self.proj.regionMapping.iteritems():
if regionName in newRegionNames:
return oldRegionName
print 'Cannot find region with sub-region %s' % regionName
return None
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def data_gen(self):
self.ax.cla()
self.plotPioneer(1)
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
yield(pose[0],pose[1])
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
| gpl-3.0 |
awni/tensorflow | tensorflow/contrib/skflow/python/skflow/tests/test_multioutput.py | 1 | 1502 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from sklearn import datasets
from sklearn.metrics import accuracy_score, mean_squared_error
import tensorflow as tf
from tensorflow.contrib.skflow.python import skflow
class MultiOutputTest(tf.test.TestCase):
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
regressor = skflow.TensorFlowLinearRegressor(learning_rate=0.01)
regressor.fit(X, y)
score = mean_squared_error(regressor.predict(X), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mgruben/morse-code | python/MorseCodeDecoder.py | 1 | 16481 | import re
import matplotlib.pyplot as plt
import seaborn as sns
MORSE_CODE = {
".-": "A",
"-...": "B",
"-.-.": "C",
"-..": "D",
".": "E",
"..-.": "F",
"--.": "G",
"....": "H",
"..": "I",
".---": "J",
"-.-": "K",
".-..": "L",
"--": "M",
"-.": "N",
"---": "O",
".--.": "P",
"--.-": "Q",
".-.": "R",
"...": "S",
"-": "T",
"..-": "U",
"...-": "V",
".--": "W",
"-..-": "X",
"-.--": "Y",
"--..": "Z",
"-----": "0",
".----": "1",
"..---": "2",
"...--": "3",
"....-": "4",
".....": "5",
"-....": "6",
"--...": "7",
"---..": "8",
"----.": "9",
"...---...": "SOS"
}
heyJude = ".... . -.-- .--- ..- -.. ."
JudeBits = "00011001100110011000000110000001111110011001111110011111100000000000000110011111100111111001111110000001100110011111100000011111100110011000000110000"
fuzzyBits = "0000000011011010011100000110000001111110100111110011111100000000000111011111111011111011111000000101100011111100000111110011101100000100000"
fuzzyTest = "00000000000000011111111000000011111111111100000000000111111111000001111111110100000000111111111111011000011111111011111111111000000000000000000011111111110000110001111111111111000111000000000001111111111110000111111111100001100111111111110000000000111111111111011100001110000000000000000001111111111010111111110110000000000000001111111111100001111111111110000100001111111111111100000000000111111111000000011000000111000000000000000000000000000011110001111100000111100000000111111111100111111111100111111111111100000000011110011111011111110000000000000000000000111111111110000000011111000000011111000000001111111111110000000001111100011111111000000000111111111110000011000000000111110000000111000000000011111111111111000111001111111111001111110000000000000000000001111000111111111100001111111111111100100000000001111111100111111110111111110000000011101111111000111000000001001111111000000001111111111000000000111100001111111000000000000011111111100111111110111111111100000000000111111110000001100000000000000000000111111101010000010000001111111100000000011111000111111111000000111111111110011111111001111111110000000011000111111110000111011111111111100001111100001111111100000000000011110011101110001000111111110000000001111000011111110010110001111111111000000000000000000111111111110000000100000000000000000011110111110000001000011101110000000000011111111100000011111111111100111111111111000111111111000001111111100000000000001110111111111111000000110011111111111101110001111111111100000000111100000111100000111111111100000111111111111000000011111111000000000001000000111100000001000001111100111111111110000000000000000000010001111111100000011111111100000000000000100001111111111110111001111111111100000111111100001111111111000000000000000000000000011100000111111111111011110000000010000000011111111100011111111111100001110000111111111111100000000000000111110000011111001111111100000000000011100011100000000000011111000001111111111101000000001110000000000000000000000000000111110010000000000111111111000011111111110000000000111111111111101111111111100000000010000000000000011111111100100001100000000000000111100111100000000001100000001111111111110000000011111111111000000000111100000000000000000000111101111111111111000000000001111000011111000011110000000001100111111100111000000000100111000000000000111110000010000011111000000000000001111111111100000000110111111111100000000000000111111111111100000111000000000111111110001111000000111111110111111000000001111000000000010000111111111000011110001111111110111110000111111111111000000000000000000000000111111111110000000111011111111100011111110000000001111111110000011111111100111111110000000001111111111100111111111110000000000110000000000000000001000011111111110000000001111111110000000000000000000000011111111111111000000111111111000001111111110000000000111111110000010000000011111111000011111001111111100000001110000000011110000000001011111111000011111011111111110011011111111111000000000000000000100011111111111101111111100000000000000001100000000000000000011110010111110000000011111111100000000001111100011111111111101100000000111110000011110000111111111111000000001111111111100001110111111111110111000000000011111111101111100011111111110000000000000000000000000010000111111111100000000001111111110111110000000000000000000000110000011110000000000001111111111100110001111111100000011100000000000111110000000011111111110000011111000001111000110000000011100000000000000111100001111111111100000111000000001111111111000000111111111100110000000001111000001111111100011100001111111110000010011111111110000000000000000000111100000011111000001111000000000111111001110000000011111111000100000000000011111111000011001111111100000000000110111000000000000111111111111000100000000111111111110000001111111111011100000000000000000000000000"
class Cluster(object):
def __init__(self, loc):
self.currentPoints = []
self.centroid = None
self.previousPoints = []
self.location = loc
## Methods for claiming currentPoints and calculating centroid.
def addPoint(self, point):
self.currentPoints.append(point)
def didChange(self):
if len(self.currentPoints) != len(self.previousPoints):
return True
else:
return not (self.currentPoints == self.previousPoints)
def clearPoints(self):
self.previousPoints = self.currentPoints[:]
del self.currentPoints[:]
def update(self):
'''
After new points have been assigned to this cluster, this method
calculates the new centroid of the cluster and moves the cluster
to that location.
'''
if len(self.currentPoints) > 0:
s = 0.0
for p in self.currentPoints:
s += p
self.centroid = s / len(self.currentPoints)
self.location = self.centroid
## Getter methods.
def getLocation(self):
return self.location
def getDistance(self, point):
return abs(self.location - point)
## Printer methods.
def printCentroid(self):
print(self.centroid)
def printLocation(self):
print(self.location)
def printPoints(self):
result = ""
for point in self.currentPoints:
result += str(point) + " "
print(result[:-1])
def printPreviousPoints(self):
result = ""
for point in self.previousPoints:
result += str(point) + " "
print(result[:-1])
class KMeans(object):
def __init__(self, stream, numClusters):
self.clusters = []
self.bitCollection = []
self.timeUnits = [0,0,0]
self.dist = {}
self.keys = []
self.converged = False
stream = stream.strip("0")
## Populate this.bitCollection.
if len(stream) == 0:
self.bitCollection.append("")
else:
ones = re.split("0+", stream)
zeros = re.split("1+", stream)
if len(zeros) == 0:
self.bitCollection.append(ones[0])
else:
for i in range(len(ones) - 1):
self.bitCollection.append(ones[i])
self.bitCollection.append(zeros[i + 1])
self.bitCollection.append(ones[-1])
## Populate this.dist.
for bit in self.bitCollection:
l = len(bit)
if l in self.dist:
self.dist[l] += 1
else:
self.dist[l] = 1
self.keys = sorted(self.dist.keys())
## Handle short inputs (i.e. displays fewer than three timing units)
if len(self.keys) == 1 or len(self.keys) == 2:
self.timeUnits[0] = self.keys[0]
self.timeUnits[1] = self.keys[0] * 3
self.timeUnits[2] = self.keys[0] * 7
self.converged = True
## Handle long inputs via KMeans
else:
self.initializeClusters()
def initializeClusters(self):
'''
Populates this.clusters with this.numClusters Cluster objects,
whose initial locations are from this.keys (the minimum, the
maximum, and the middle between the two).
'''
self.clusters.append(Cluster(float(self.keys[0])))
self.clusters.append(Cluster((float(self.keys[0]) + float(self.keys[-1])) / 2))
self.clusters.append(Cluster(float(self.keys[-1])))
def assignToClosestCluster(self):
'''
Assigns cluster-labels to each length-point from the fuzzy input,
which is subsequently used by the clusters to re-calculate their
centroids and move accordingly.
'''
self.clear()
for key in self.keys:
bestCluster = Cluster(5000)
closest = 10000000.0
for c in self.clusters:
d = c.getDistance(key)
if d < closest:
closest = d
bestCluster = c
for i in range(self.dist[key]):
bestCluster.addPoint(key)
def calculateTimeUnits(self):
for i in range(3):
self.timeUnits[i] = self.clusters[i].getLocation()
def clear(self):
for c in self.clusters:
c.clearPoints()
def converge(self):
'''
Assigns the closest Cluster to each point, calculates the centroid
for those Clusters based off of those points, moves the Clusters
to their respective centroids, and repeats until assignment on the next
iteration is the same.
'''
if not self.converged:
self.assignToClosestCluster()
while not self.converged:
self.update()
self.assignToClosestCluster()
if not self.didChange():
self.converged = True
self.calculateTimeUnits()
def didChange(self):
for c in self.clusters:
if c.didChange():
return True
return False
def update(self):
for c in self.clusters:
c.update()
## Getter methods.
def getTimeUnit(self, index):
return self.timeUnits[index]
## Printer methods.
def printBitCollection(self):
for bit in self.bitCollection:
print(bit)
def printClusterPoints(self):
for c in self.clusters:
print("Points for cluster at " + str(c.getLocation()))
c.printPoints()
def printClusters(self):
for c in self.clusters:
print(c.getLocation())
def printDidChange(self):
print(self.didChange)
def printDistances(self):
for key in self.keys:
best = -1.0
closest = 10000000.0
for c in self.clusters:
d = c.getDistance(key)
print("From cluster at " + str(c.getLocation()) + \
"to point at " + str(key) + " is: " + str(d))
if d < closest:
closest = d
best = c.getLocation()
print("Closest to: " + str(best))
def printDistribution(self):
for key in self.keys:
print("Length: " + str(key) + " occurred " + str(self.dist[key]) + " times")
def printKeys(self):
for key in self.keys:
print(key)
def printTimeUnits(self):
for t in self.timeUnits:
print(t)
## Plotter methods.
def plotDistribution(self):
xmax = max(self.keys)
ymax = max(self.dist.values())
plt.figure()
plt.bar(self.dist.keys(), self.dist.values())
plt.title("Bit Length Frequencies")
plt.xlabel("Number of Characters")
plt.ylabel("Frequency")
plt.axis([0, xmax, 0, ymax])
plt.axvline((self.getTimeUnit(0) + self.getTimeUnit(1)) / 2, color='b', linestyle='dashed', linewidth=2)
plt.axvline((self.getTimeUnit(1) + self.getTimeUnit(2)) / 2, color='b', linestyle='dashed', linewidth=2)
plt.show()
# Required as per problem API
def decodeBitsAdvanced(fuzzyBits):
'''
input bits, a string of 0s and 1s with variable timing
returns string, a morse code message
'''
morse = ""
fuzzyBits = fuzzyBits.strip("0")
km = KMeans(fuzzyBits, 3)
km.converge()
thresh13 = (km.getTimeUnit(0) + km.getTimeUnit(1)) / 2
thresh37 = (km.getTimeUnit(1) + km.getTimeUnit(2)) / 2
ones = re.split("0+", fuzzyBits)
zeros = re.split("1+", fuzzyBits)
for i in range(len(zeros) - 1):
morse += nextTelePairFuzzy(ones[i], zeros[i + 1], thresh13, thresh37)
return morse
# Required as per problem API
def decodeBits(bits):
'''
input bits, a string of 0s and 1s with fixed timing
returns string, a morse code message
'''
morse = ""
bits = bits.strip("0")
tu = getTimeUnit(bits)
ones = re.split("0+", bits)
zeros = re.split("1+",bits)
for i in range(len(zeros) - 1):
morse += nextTelePair(ones[i], zeros[i + 1], tu)
return morse
# Required as per problem API
def decodeMorse(morseCode):
'''
input morseCode, a string of dots, dashes, and spaces
returns string, a human-readable message
'''
result = ""
morseCode = morseCode.replace(" ", " SPACE ")
morses = morseCode.split()
for morse in morses:
if morse == "SPACE":
result += " "
else:
try:
result += MORSE_CODE[morse]
except KeyError:
result += "(KEYERR: "+morse+")"
return result
# Helper function for 3 of 3 in the series
def nextTelePairFuzzy(one, zero, thresh13, thresh37):
tele = nextTeleSingleFuzzy(one, thresh13)
if len(zero) >= thresh13 and len(zero) < thresh37:
tele += " "
elif len(zero) >= thresh37:
tele += " "
return tele
# Helper function for 3 of 3 in the series
def nextTeleSingleFuzzy(one, thresh13):
tele = ""
if len(one) <= thresh13:
tele += "."
else:
tele += "-"
return tele
# Helper function for 2 of 3 in the series
def nextTelePair(one, zero, tu):
tele = nextTeleSingle(one, tu)
if len(zero) == 3 * tu:
tele += " "
elif len(zero) == 7 * tu:
tele += " "
return tele
# Helper function for 2 of 3 in the series
def nextTeleSingle(one, tu):
tele = ""
if len(one) == tu:
tele += "."
elif len(one) == 3 * tu:
tele += "-"
return tele
# Helper function for 2 of 3 in the series
def getTimeUnit(bits):
'''
input bits, a string of 0s and 1s with fixed timing
returns int, the single timing unit (i.e. dot or shortest pause)
'''
if bits == "":
return 0
o = re.split("0+", bits)
if len(o) == 1:
return len(bits)
if o == ['','']:
return 0
os = len(o[0])
for elem in o:
if len(elem) != os:
os = min(os,len(elem))
break
z = re.split("1+", bits)
zs = len(z[1])
for i in range(1, len(z) - 1):
if zs != len(z[i]):
zs = min(zs, len(z[i]))
break
return min(os, zs)
# Function to rapidly iterate through possible thresholds;
# not a valid way of solving 3 of 3.
def bruteThreshholds(fuzzyBits):
fuzzyBits = fuzzyBits.strip("0")
f = open('BruteForceDump', 'w')
ones = re.split("0+", fuzzyBits)
zeros = re.split("1+", fuzzyBits)
lowerStart = 7
lowerStop = 8 # exclusive
lowerStep = 0.25
upperStart = 15
upperStop = 16 # exclusive
upperStep = 0.25
for lower in range(0, int((lowerStop - lowerStart) / lowerStep), 1):
for upper in range(0, int((upperStop - upperStart) / upperStep), 1):
morse = ""
for i in range(len(zeros) - 1):
morse += nextTelePairFuzzy(ones[i], zeros[i + 1],
lowerStart + lower * lowerStep, upperStart + upper * upperStep)
f.write(str(lowerStart + lower * lowerStep) + " " + str(upperStart + upper * upperStep) + '\n')
f.write(decodeMorse(morse))
f.write('\n\n')
| gpl-3.0 |
toobaz/pandas | pandas/tests/indexes/timedeltas/test_setops.py | 2 | 6937 | import numpy as np
import pytest
import pandas as pd
from pandas import Int64Index, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
from pandas.tseries.offsets import Hour
class TestTimedeltaIndex:
def test_union(self):
i1 = timedelta_range("1day", periods=5)
i2 = timedelta_range("3day", periods=5)
result = i1.union(i2)
expected = timedelta_range("1day", periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = timedelta_range(start="1 day", periods=10, freq="D")
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(["3d", "1d", "2d"])
ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range("1 day", periods=4, freq="3H")
rng_b = timedelta_range("1 day", periods=4, freq="4H")
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(["1 day 15:19:49.695000"])
right = TimedeltaIndex(
["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_intersection_bug_1708(self):
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_equal(self, sort):
# GH 24471 Test intersection outcome given the sort keyword
# for equal indicies intersection should return the original index
first = timedelta_range("1 day", periods=4, freq="h")
second = timedelta_range("1 day", periods=4, freq="h")
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_zero_length(self, period_1, period_2, sort):
# GH 24471 test for non overlap the intersection should be zero length
index_1 = timedelta_range("1 day", periods=period_1, freq="h")
index_2 = timedelta_range("1 day", periods=period_2, freq="h")
expected = timedelta_range("1 day", periods=0, freq="h")
result = index_1.intersection(index_2, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_zero_length_input_index(self, sort):
# GH 24966 test for 0-len intersections are copied
index_1 = timedelta_range("1 day", periods=0, freq="h")
index_2 = timedelta_range("1 day", periods=3, freq="h")
result = index_1.intersection(index_2, sort=sort)
assert index_1 is not result
assert index_2 is not result
tm.assert_copy(result, index_1)
@pytest.mark.parametrize(
"rng, expected",
# if target has the same name, it is preserved
[
(
timedelta_range("1 day", periods=5, freq="h", name="idx"),
timedelta_range("1 day", periods=4, freq="h", name="idx"),
),
# if target name is different, it will be reset
(
timedelta_range("1 day", periods=5, freq="h", name="other"),
timedelta_range("1 day", periods=4, freq="h", name=None),
),
# if no overlap exists return empty index
(
timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
TimedeltaIndex([], name="idx"),
),
],
)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, rng, expected, sort):
# GH 4690 (with tz)
base = timedelta_range("1 day", periods=4, freq="h", name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
@pytest.mark.parametrize(
"rng, expected",
# part intersection works
[
(
TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
),
# reordered part intersection
(
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
# reveresed index
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
],
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
),
],
)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_non_monotonic(self, rng, expected, sort):
# 24471 non-monotonic
base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
# if reveresed order, frequency is still the same
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
assert result.freq is None
| bsd-3-clause |
nvoron23/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
sanja7s/SR_Twitter | src_general/explain_FORMATION_DELETION_REL.py | 1 | 6415 | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from pylab import *
width = 0.28 # the width of the bars
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
# plot with various axes scales
plt.figure(1)
fig = gcf()
def plot_bars_FORMATION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 0))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='darkred', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='lightcoral', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Formed and persisting', \
'Formed and non-persisting', 'Persisting average'),\
frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At formation', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
N = 3
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
formationDeletionMeans = (1.12747979427, 1.56808719079, 1.62160176341)
formationDeletionStd = (1.35650452374, 1.71205560699, 1.83913259462)
# PERSISTING LINKS
# STRONG contacts REL
formationNodeletionMeans = (0.964889222681, 1.44874202028, 1.68794592565)
formationNodeletionStd = (1.30256068643, 1.64860382968, 1.94388833634)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_FORMATION_STRONG_REL(formationNodeletionMeans, formationNodeletionStd,\
formationDeletionMeans, formationDeletionStd, SRMeansS, SRStdS)
def plot_bars_DELETION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 1))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='c', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='cyan', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Persisting decommissioned', \
'Non-persisting decommissioned', 'Persisting average'),\
loc='best',frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At decommission', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
#deletionFormationMeans = (1.35860783095, 1.40335612181, 1.38222498446)
#deletionFormationStd = (1.39698763227, 1.515042018, 1.6001731639)
deletionFormationMeans = (1.21614009307, 1.58645603723, 1.613397012)
deletionFormationStd = (1.39228801763, 1.73298601092, 1.84822380219)
# PERSISTING LINKS
#deletionNoformationMeans = (1.16101995042, 1.52591193484, 1.54066816196)
#deletionNoformationStd = (1.36105887603, 1.69996084625, 1.80123581372)
deletionNoformationMeans = (1.09195402299, 1.16457680251, 1.09717868339)
deletionNoformationStd = (1.25857893939, 1.33146910699, 1.31900439894)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_DELETION_STRONG_REL(deletionNoformationMeans, deletionNoformationStd,\
deletionFormationMeans, deletionFormationStd, SRMeansS, SRStdS)
##########################################################################
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(12.4,4.5)
plt.tight_layout()
#plt.figtext(0.20, 0.49, 'Relative status of the pair: weak contacts')
#plt.figtext(0.27, 0.973, 'Relative status of the pair: strong contacts')
fig.suptitle('Relative status (strong contacts)', verticalalignment='center', horizontalalignment='center', size = 16)
#fig.suptitle('Sum including weak contacts', verticalalignment='center', y=0.5, horizontalalignment='center', size = 16)
plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/explain_FORMATION_DELETION_REL.eps", dpi=710)
| mit |
MalkIPP/ipp_work | ipp_work/example/tax_rate_by_decile.py | 1 | 1569 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 17:07:33 2015
@author: malkaguillot
"""
from ipp_work.utils import survey_simulate, df_weighted_average_grouped
from ipp_work.simulations.ir_marg_rate import varying_survey_simulation
from ipp_work.example.quantiles_of_revimp import make_weighted_deciles_of_variable
import pandas
year = 2009
ind_variables = ['idmen', 'quimen', 'idfoy', 'salaire_imposable', 'salaire_net']
foy_variables = ['irpp', 'decile_rfr', 'weight_foyers', 'idfoy_original', 'rfr']
used_as_input_variables = ['salaire_imposable', 'cho', 'rst', 'age_en_mois', 'smic55']
df_by_entity_key_plural, simulation = survey_simulate(used_as_input_variables, year, ind_variables,
foy_variables = foy_variables)
df_individus = df_by_entity_key_plural['individus']
df_foyers = df_by_entity_key_plural['foyers']
tax_rates = varying_survey_simulation(year = 2009, increment = 10, target = 'irpp', varying = 'rni',
used_as_input_variables = used_as_input_variables)
tax_rates = tax_rates[['idfoy_original', 'marginal_rate', 'average_rate']]
df_foyers = pandas.merge(df_foyers, tax_rates, on = 'idfoy_original')
make_weighted_deciles_of_variable(df_foyers, 'rfr', 'weight_foyers', 100)
Wconcat = df_weighted_average_grouped(
dataframe = df_foyers,
groupe = 'decile_of_rfr',
varlist = [
'marginal_rate', 'average_rate'
],
)
print Wconcat
df_foyers['decile_rfr'].count()
df_foyers['rfr'].describe()
df_foyers['weight_foyers'].describe() | agpl-3.0 |
MarkRegalla27/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
bikash/kaggleCompetition | microsoft malware/code/_untuned_modeling.py | 1 | 5556 | ######################################################
# _untuned_modeling.py
# author: Gert Jacobusse, gert.jacobusse@rogatio.nl
# licence: FreeBSD
"""
Copyright (c) 2015, Gert Jacobusse
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#first run feature_extraction.py
#then run this file from the same directory
######################################################
# import dependencies
import csv
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.ensemble import GradientBoostingClassifier,ExtraTreesClassifier
from sklearn.metrics import log_loss
######################################################
# list ids and labels
trainids=[]
labels=[]
with open('trainLabels.csv','r') as f:
r=csv.reader(f)
r.next() # skip header
for row in r:
trainids.append(row[0])
labels.append(float(row[1]))
testids=[]
with open('sampleSubmission.csv','r') as f:
r=csv.reader(f)
r.next()
for row in r:
testids.append(row[0])
######################################################
# general functions
def readdata(fname,header=True,selectedcols=None):
with open(fname,'r') as f:
r=csv.reader(f)
names = r.next() if header else None
if selectedcols:
assert header==True
data = [[float(e) for i,e in enumerate(row) if names[i] in selectedcols] for row in r]
names = [name for name in names if name in selectedcols]
else:
data = [[float(e) for e in row] for row in r]
return data,names
def writedata(data,fname,header=None):
with open(fname,'w') as f:
w=csv.writer(f)
if header:
w.writerow(header)
for row in data:
w.writerow(row)
######################################################
# cross validation
"""
function docv
input: classifier, kfolds object, features, labels, number of data rows
output: holdout-set-predictions for all rows
* run cross validation
"""
def docv(clf,kf,x,y,nrow,nlab=9):
pred = np.zeros((nrow,nlab))
for trainidx, testidx in kf:
clf.fit(x[trainidx],y[trainidx])
pred[testidx] = clf.predict_proba(x[testidx])
return pred
"""
function runcv
input: name of train/ test file, classifier 1 and 2 to be used
output: writes holdout-set-predictions for all rows to file
* run cross validation by calling docv for both classifiers, combine and save results
"""
def runcv(filename,c1,c2):
y=np.array(labels)
nrow=len(y)
x,_=readdata('train_%s'%filename)
x=np.array(x)
kf = KFold(nrow,10,shuffle=True)
p1=docv(c1,kf,x,y,nrow)
p2=docv(c2,kf,x,y,nrow)
pcombi=0.667*p1+0.333*p2
print '%.4f %.4f %.4f'%(log_loss(y,p1),log_loss(y,p2),log_loss(y,pcombi))
with open('pred_%s'%filename,'w') as f:
w=csv.writer(f)
for row in pcombi:
w.writerow(row)
######################################################
# submit and print feature importance
"""
function writesubm
input: name of train/ test file, classifier 1 and 2 to be used
output: writes testset predictions to file
* train classifiers using all traindata, create testset predictions, combine and save results
"""
def writesubm(filename,c1,c2):
xtrain,names=readdata('train_%s'%filename)
xtest,_=readdata('test_%s'%filename)
c1.fit(xtrain,labels)
c2.fit(xtrain,labels)
p1=c1.predict_proba(xtest)
p2=c2.predict_proba(xtest)
p=0.667*p1+0.333*p2
with open('subm_%s'%filename,'w') as f:
w=csv.writer(f)
w.writerow(['Id']+['Prediction%d'%num for num in xrange(1,10)])
for inum,i in enumerate(testids):
w.writerow([i]+list(p[inum]))
######################################################
# go
if __name__ == '__main__':
gbm=GradientBoostingClassifier(
n_estimators=400, max_features=5)
xtr=ExtraTreesClassifier(
n_estimators=400,max_features=None,
min_samples_leaf=2,min_samples_split=3,
n_jobs=7)
for filename in [
'45c.csv',
]:
print filename
runcv(filename,gbm,xtr)
writesubm(filename,gbm,xtr)
print ''
"""
45c.csv
0.0117 0.0168 0.0101
public LB: 0.008071379
private LB: 0.007615772
""" | apache-2.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/svm/plot_separating_hyperplane.py | 62 | 1274 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
MadsJensen/malthe_alpha_project | source_connectivity_permutation.py | 1 | 6505 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:41:17 2015.
@author: mje
"""
import numpy as np
import numpy.random as npr
import os
import socket
import mne
# import pandas as pd
from mne.connectivity import spectral_connectivity
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
# Permutation test.
def permutation_resampling(case, control, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for case is different
from statistc for control.
"""
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff) +
np.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def permutation_test(a, b, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for a is different
from statistc for b.
"""
observed_diff = abs(statistic(b) - statistic(a))
num_a = len(a)
combined = np.concatenate([a, b])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_a]) - np.mean(xs[num_a:])
diffs.append(diff)
pval = np.sum(np.abs(diffs) >= np.abs(observed_diff)) / float(num_samples)
return pval, observed_diff, diffs
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
# change dir to save files the rigth place
os.chdir(data_path)
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
# Parameters
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
#labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Brodmann',
regexp="Brodmann",
subjects_dir=subjects_dir)
labels_occ = labels[6:12]
# labels = mne.read_labels_from_annot('subject_1', parc='aparc.DKTatlas40',
# subjects_dir=subjects_dir)
for cond in epochs.event_id.keys():
stcs = apply_inverse_epochs(epochs[cond], inverse_operator, lambda2,
method, pick_ori="normal")
exec("stcs_%s = stcs" % cond)
labels_name = [label.name for label in labels_occ]
for label in labels_occ:
labels_name += [label.name]
# Extract time series
ts_ctl_left = mne.extract_label_time_course(stcs_ctl_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
ts_ent_left = mne.extract_label_time_course(stcs_ent_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
stcs_all_left = stcs_ctl_left + stcs_ent_left
ts_all_left = np.asarray(mne.extract_label_time_course(stcs_all_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip"))
number_of_permutations = 2000
index = np.arange(0, len(ts_all_left))
permutations_results = np.empty(number_of_permutations)
fmin, fmax = 7, 12
tmin, tmax = 0, 1
con_method = "plv"
diff_permuatation = np.empty([6, 6, number_of_permutations])
# diff
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
ts_ctl_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
con_ent, freqs_ent, times_ent, n_epochs_ent, n_tapers_ent =\
spectral_connectivity(
ts_ent_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
diff = con_ctl[:, :, 0] - con_ent[:, :, 0]
for i in range(number_of_permutations):
index = np.random.permutation(index)
tmp_ctl = ts_all_left[index[:64], :, :]
tmp_case = ts_all_left[index[64:], :, :]
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
tmp_ctl,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
con_case, freqs_case, times_case, n_epochs_case, n_tapers_case =\
spectral_connectivity(
tmp_case,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
diff_permuatation[:, :, i] = con_ctl[:, :, 0] - con_case[:, :, 0]
pval = np.empty_like(diff)
for h in range(diff.shape[0]):
for j in range(diff.shape[1]):
if diff[h, j] != 0:
pval[h, j] = np.sum(np.abs(diff_permuatation[h, h, :] >=
np.abs(diff[h, j, :])))/float(number_of_permutations)
# np.sum(np.abs(diff[h, j]) >= np.abs(
# diff_permuatation[h, j, :]))\
# / float(number_of_permutations)
| mit |
gtesei/fast-furious | competitions/santander-customer-transaction-prediction/base_light_gbm1.py | 1 | 2556 | import lightgbm as lgb
import pandas as pd
import numpy as np
import sys
from datetime import datetime
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
path=Path("data/")
train=pd.read_csv(path/"train.csv").drop("ID_code",axis=1)
test=pd.read_csv(path/"test.csv").drop("ID_code",axis=1)
param = {
'boost_from_average':'false',
'bagging_fraction': 0.5,
'boost': 'gbdt',
'feature_fraction': 0.02,
'learning_rate': 0.001,
'max_depth': 6,
'metric':'auc',
'min_data_in_leaf': 100,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 13,
'n_jobs': 30,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1
}
result=np.zeros(test.shape[0])
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats=5,random_state=10)
best_iteration , best_valid_auc = 0, 0
for counter,(train_index, valid_index) in enumerate(rskf.split(train, train.target),1):
print ("Rep-Fold:",counter)
sys.stdout.flush()
#Train data
t=train.iloc[train_index]
trn_data = lgb.Dataset(t.drop("target",axis=1), label=t.target)
#Validation data
v=train.iloc[valid_index]
val_data = lgb.Dataset(v.drop("target",axis=1), label=v.target)
#Training
model = lgb.train(param, trn_data, 1000000, feature_name=train.columns.tolist()[1:], valid_sets = [trn_data, val_data], verbose_eval=500, early_stopping_rounds = 4000)
result +=model.predict(test)
## feat imp
gain = model.feature_importance('gain')
ft = pd.DataFrame({'feature':train.columns.tolist()[1:],'split':model.feature_importance('split'),'gain':100 * gain / gain.sum()}).sort_values('gain', ascending=False)
print("************ FEAT IMPORTANCE *****************")
print(ft.head(25))
print()
##
_best_valid_auc = model.best_score['valid_1']['auc']
_best_iteration = model.best_iteration
print("best_iteration:",_best_iteration,"- best_valid_auc:",_best_valid_auc )
best_valid_auc +=_best_valid_auc
best_iteration += _best_iteration
submission = pd.read_csv(path/'sample_submission.csv')
submission['target'] = result/counter
filename="{:%Y-%m-%d_%H_%M}_sub_after_tune.csv".format(datetime.now())
submission.to_csv(filename, index=False)
## feat importance
best_valid_auc = best_valid_auc/counter
best_iteration = best_iteration/counter
fh = open("base_light_gbm1.log","w")
print("best_iteration_avg:",best_iteration,"- best_valid_auc_avg:",best_valid_auc,file=fh)
fh.close()
| mit |
DucQuang1/py-earth | doc/generate_figures.py | 1 | 1933 | import matplotlib as mpl
mpl.use('Agg')
import numpy
from pyearth import Earth
from matplotlib import pyplot
#=========================================================================
# V-Function Example
#=========================================================================
# Create some fake data
numpy.random.seed(0)
m = 1000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y = numpy.abs(X[:, 6] - 4.0) + 1 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth()
model.fit(X, y)
# Print the model
print model.trace()
print model.summary()
# Plot the model
y_hat = model.predict(X)
pyplot.figure()
pyplot.plot(X[:, 6], y, 'r.')
pyplot.plot(X[:, 6], y_hat, 'b.')
pyplot.xlabel('x_6')
pyplot.ylabel('y')
pyplot.title('Simple Earth Example')
pyplot.savefig('simple_earth_example.png')
#=========================================================================
# Hinge plot
#=========================================================================
from xkcdify import XKCDify
x = numpy.arange(-10, 10, .1)
y = x * (x > 0)
fig = pyplot.figure(figsize=(10, 5))
pyplot.plot(x, y)
ax = pyplot.gca()
pyplot.title('Basic Hinge Function')
pyplot.xlabel('x')
pyplot.ylabel('h(x)')
pyplot.annotate('x=t', (0, 0), xytext=(-30, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
XKCDify(ax)
pyplot.setp(ax, frame_on=False)
pyplot.savefig('hinge.png')
#=========================================================================
# Piecewise Linear Plot
#=========================================================================
m = 1000
x = numpy.arange(-10, 10, .1)
y = 1 - 2 * (1 - x) * (x < 1) + 0.5 * (x - 1) * (x > 1)
pyplot.figure(figsize=(10, 5))
pyplot.plot(x, y)
ax = pyplot.gca()
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.title('Piecewise Linear Function')
XKCDify(ax)
pyplot.setp(ax, frame_on=False)
pyplot.savefig('piecewise_linear.png')
| bsd-3-clause |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| gpl-3.0 |
TuKo/brainiak | examples/fcma/classification.py | 1 | 9141 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.classifier import Classifier
from brainiak.fcma.preprocessing import prepare_fcma_data
from brainiak.io import dataset
from sklearn import svm
#from sklearn.linear_model import LogisticRegression
import sys
import logging
import numpy as np
from scipy.spatial.distance import hamming
from sklearn import model_selection
#from sklearn.externals import joblib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
def example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
rearranged_data = raw_data[num_epochs_per_subj:] + raw_data[0:num_epochs_per_subj]
rearranged_labels = labels[num_epochs_per_subj:] + labels[0:num_epochs_per_subj]
clf.fit(list(zip(rearranged_data, rearranged_data)), rearranged_labels,
num_training_samples=num_epochs_per_subj*(num_subjects-1))
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[0:num_epochs_per_subj]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
def example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj):
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
for i in range(num_subjects):
leave_start = i * num_epochs_per_subj
leave_end = (i+1) * num_epochs_per_subj
training_data = raw_data[0:leave_start] + raw_data[leave_end:]
test_data = raw_data[leave_start:leave_end]
training_labels = labels[0:leave_start] + labels[leave_end:]
test_labels = labels[leave_start:leave_end]
clf.fit(list(zip(training_data, training_data)), training_labels)
# joblib can be used for saving and loading models
#joblib.dump(clf, 'model/logistic.pkl')
#clf = joblib.load('model/svm.pkl')
predict = clf.predict(list(zip(test_data, test_data)))
print(predict)
print(clf.decision_function(list(zip(test_data, test_data))))
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when leaving subject %d out for testing, the accuracy is %d / %d = %.2f' %
(i, num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
print(clf.score(list(zip(test_data, test_data)), test_labels))
def example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj):
# NOTE: this method does not work for sklearn.svm.SVC with precomputed kernel
# when the kernel matrix is computed in portions; also, this method only works
# for self-correlation, i.e. correlation between the same data matrix.
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_subjects,
shuffle=False)
scores = model_selection.cross_val_score(clf, list(zip(raw_data, raw_data)),
y=labels,
cv=skf)
print(scores)
logger.info(
'the overall cross validation accuracy is %.2f' %
np.mean(scores)
)
def example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data[0:num_training_samples], raw_data2[0:num_training_samples])),
labels[0:num_training_samples])
X = list(zip(raw_data[num_training_samples:], raw_data2[num_training_samples:]))
predict = clf.predict(X)
print(predict)
print(clf.decision_function(X))
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(X, test_labels))
def example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels,
num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data, raw_data2)), labels,
num_training_samples=num_training_samples)
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
# python3 classification.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
if len(sys.argv) != 5:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
extension = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
epoch_list = np.load(epoch_file)
num_subjects = len(epoch_list)
num_epochs_per_subj = epoch_list[0].shape[1]
images = dataset.load_images_from_dir(data_dir, extension)
mask = dataset.load_boolean_mask(mask_file)
conditions = dataset.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj)
# test of two different components for correlation computation
# images = dataset.load_images_from_dir(data_dir, extension)
# mask2 = dataset.load_boolean_mask('face_scene/visual_top_mask.nii.gz')
# raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
# mask2)
#example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
#example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
| apache-2.0 |
gmsanchez/nmpc_comparison | cstr_startup_colloc.py | 1 | 9778 | # Linear and nonlinear control of startup of a CSTR.
import mpctools as mpc
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import time
# Define some parameters and then the CSTR model.
Nx = 3
Nu = 2
Nd = 1
# Ny = Nx
Delta = .25
# eps = 1e-6 # Use this as a small number.
T0 = 350
c0 = 1
r = .219
k0 = 7.2e10
E = 8750
U = 54.94
rho = 1000
Cp = .239
dH = -5e4
def ode(x,u,d):
# Grab the states, controls, and disturbance. We would like to write
#
# [c, T, h] = x[0:Nx]
# [Tc, F] = u[0:Nu]
# [F0] = d[0:Nd]
#
# but this doesn't work in Casadi 3.0. So, we're stuck with the following:
c = x[0]
T = x[1]
h = x[2]
Tc = u[0]
F = u[1]
F0 = d[0]
# Now create the ODE.
rate = k0*c*np.exp(-E/T)
dxdt = np.array([
F0*(c0 - c)/(np.pi*r**2*h) - rate,
F0*(T0 - T)/(np.pi*r**2*h)
- dH/(rho*Cp)*rate
+ 2*U/(r*rho*Cp)*(Tc - T),
(F0 - F)/(np.pi*r**2)
])
return dxdt
# Turn into casadi function and simulator.
ode_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="ode")
ode_rk4_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],
funcname="ode_rk4",rk4=False,Delta=Delta)
cstr = mpc.DiscreteSimulator(ode, Delta, [Nx,Nu,Nd], ["x","u","d"])
# Steady-state values.
cs = .878
Ts = 324.5
hs = .659
Fs = .1
Tcs = 300
F0s = .1
# Update the steady-state values a few times to make sure they don't move.
for i in range(10):
[cs,Ts,hs] = cstr.sim([cs,Ts,hs],[Tcs,Fs],[F0s]).tolist()
xs = np.array([cs,Ts,hs])
us = np.array([Tcs,Fs])
ds = np.array([F0s])
# Now get a linearization at this steady state.
#ss = mpc.util.getLinearizedModel(ode_casadi, [xs,us,ds], ["A","B","Bp"], Delta)
#A = ss["A"]
#B = ss["B"]
#Bp = ss["Bp"]
#C = np.eye(Nx)
# Weighting matrices for controller.
Q = .5*np.diag(xs**-2)
R = 2*np.diag(us**-2)
# model_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="cstr")
#[K, Pi] = mpc.util.dlqr(A,B,Q,R)
# Define casadi functions.
Fnonlinear = ode_rk4_casadi
# def measurement(x,d):
# return x
# h = mpc.getCasadiFunc(measurement,[Nx,Nd],["x","d"],funcname="h")
#def linmodel(x,u,d):
# Ax = mpc.mtimes(A,x-xs) + xs
# Bu = mpc.mtimes(B,u-us)
# Bpd = mpc.mtimes(Bp,d-ds)
# return Ax + Bu + Bpd
#Flinear = mpc.getCasadiFunc(linmodel,[Nx,Nu,Nd],["x","u","d"],funcname="F")
def stagecost(x,u,xsp,usp,Q,R):
# Return deviation variables.
dx = x - xsp
du = u - usp
# Calculate stage cost.
return mpc.mtimes(dx.T,Q,dx) + mpc.mtimes(du.T,R,du)
largs = ["x","u","x_sp","u_sp","Q","R"]
l = mpc.getCasadiFunc(stagecost,[Nx,Nu,Nx,Nu,(Nx,Nx),(Nu,Nu)],largs,
funcname="l")
def costtogo(x,xsp):
# Deviation variables.
dx = x - xsp
# Calculate cost to go.
return mpc.mtimes(dx.T,10*Q,dx)
Pf = mpc.getCasadiFunc(costtogo,[Nx,Nx],["x","s_xp"],funcname="Pf")
# First see what happens if we try to start up the reactor under no control.
Nsim = 100
x0 = np.array([.05*cs,.75*Ts,.5*hs])
xcl = {}
ucl = {}
xcl["uncont"] = np.zeros((Nsim+1,Nx))
xcl["uncont"][0,:] = x0
ucl["uncont"] = np.tile(us,(Nsim,1))
for t in range(Nsim):
xcl["uncont"][t+1,:] = cstr.sim(xcl["uncont"][t,:],ucl["uncont"][t,:],ds)
# Build a solver for the linear and nonlinear models.
Nt = 15
sp = {"x" : np.tile(xs, (Nt+1,1)), "u" : np.tile(us, (Nt,1))}
#xguesslin = np.zeros((Nt+1,Nx))
#xguesslin[0,:] = x0
#for t in range(Nt):
# xguesslin[t+1,:] = A.dot(xguesslin[t,:] - xs) + xs
#guesslin = {"x" : xguesslin, "u" : np.tile(us,(Nt,1))}
guessnonlin = sp.copy()
# Control bounds.
umax = np.array([.05*Tcs,.15*Fs])
dumax = .2*umax # Maximum for rate-of-change.
bounds = dict(uub=[us + umax],ulb=[us - umax])
ub = {"u" : np.tile(us + umax, (Nt,1)), "Du" : np.tile(dumax, (Nt,1))}
lb = {"u" : np.tile(us - umax, (Nt,1)), "Du" : np.tile(-dumax, (Nt,1))}
N = {"x":Nx, "u":Nu, "p":Nd, "t":Nt, "c":3}
p = np.tile(ds, (Nt,1)) # Parameters for system.
nmpc_commonargs = {
"N" : N,
"Delta": Delta,
"x0" : x0,
"lb" : lb,
"ub" : ub,
"p" : p,
"verbosity" : 0,
"Pf" : Pf,
"l" : l,
"sp" : sp,
"uprev" : us,
"funcargs" : {"l" : largs},
"extrapar" : {"Q" : Q, "R" : R}, # In case we want to tune online.
}
solvers = {}
# solvers["lmpc"] = mpc.nmpc(f=Flinear,guess=guesslin,**nmpc_commonargs)
solvers["nmpc"] = mpc.nmpc(f=Fnonlinear,guess=guessnonlin,**nmpc_commonargs)
# Also build steady-state target finders.
contVars = [0,2]
#sstarg_commonargs = {
# "N" : N,
# "lb" : {"u" : np.tile(us - umax, (1,1))},
# "ub" : {"u" : np.tile(us + umax, (1,1))},
# "verbosity" : 0,
## "h" : h,
# "p" : np.array([ds]),
#}
#sstargs = {}
# sstargs["lmpc"] = mpc.sstarg(f=Flinear,**sstarg_commonargs)
# sstargs["nmpc"] = mpc.sstarg(f=Fnonlinear,**sstarg_commonargs)
# Now simulate the process under control.
tcl = {}
for method in solvers.keys():
xcl[method] = np.zeros((Nsim+1,Nx))
xcl[method][0,:] = x0
tcl[method] = np.zeros((Nsim+1,1))
thisx = x0
ucl[method] = np.zeros((Nsim,Nu))
# ysp = np.tile(xs,(Nsim+1,1))
xsp = np.zeros((Nsim+1,Nx))
usp = np.zeros((Nsim,Nu))
# ysp[int(Nsim/3):int(2*Nsim/3),:] = xs*np.array([.85,.75,1.15])
for t in range(Nsim):
# Figure out setpoints.
# if t == 0 or not np.all(ysp[t,:] == ysp[t-1,:]):
# thisysp = ysp[t,:]
# sstargs[method].fixvar("y",0,thisysp[contVars],contVars)
# sstargs[method].guess["u",0] = us
# sstargs[method].guess["x",0] = thisysp
# sstargs[method].guess["y",0] = thisysp
# sstargs[method].solve()
#
# print "%10s %3d: %s" % ("sstarg",t,sstargs[method].stats["status"])
# if sstargs[method].stats["status"] != "Solve_Succeeded":
# print "***Target finder failed!"
# break
#
# xsp[t,:] = np.squeeze(sstargs[method].var["x",0])
# usp[t,:] = np.squeeze(sstargs[method].var["u",0])
#
# solvers[method].par["x_sp"] = [xsp[t,:]]*(Nt + 1)
# solvers[method].par["u_sp"] = [usp[t,:]]*Nt
# Fix initial condition and solve.
t0 = time.time()
solvers[method].fixvar("x",0,thisx)
solvers[method].solve()
print "%10s %3d: %s" % (method,t,solvers[method].stats["status"])
if solvers[method].stats["status"] != "Solve_Succeeded":
print "***Solver failed!"
break
else:
solvers[method].saveguess()
thisu = np.squeeze(solvers[method].var["u"][0])
ucl[method][t,:] = thisu
t1 = time.time()
tcl[method][t] = t1-t0
thisx = cstr.sim(thisx,thisu,ds)
xcl[method][t+1,:] = thisx
# Update previous u.
solvers[method].par["u_prev",0] = ucl[method][t,:]
# Define plotting function.
def cstrplot(x,u,xsp=None,contVars=[],title=None,colors={},labels={},
markers={},keys=None,bounds=None,ilegend=0):
if keys is None:
keys = x.keys()
for k in keys:
u[k] = np.concatenate((u[k],u[k][-1:,:]))
ylabelsx = ["$c$ (mol/L)", "$T$ (K)", "$h$ (m)"]
ylabelsu = ["$T_c$ (K)", "$F$ (kL/min)"]
gs = gridspec.GridSpec(Nx*Nu,2)
fig = plt.figure(figsize=(10,6),facecolor="none")
leglines = []
leglabels = []
for i in range(Nx):
ax = fig.add_subplot(gs[i*Nu:(i+1)*Nu,0])
for k in keys:
t = np.arange(0,x[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k),
"marker":markers.get(k,"")}
[line] = ax.plot(t,x[k][:,i],markeredgecolor="none",**args)
if i == ilegend:
leglines.append(line)
leglabels.append(args["label"])
if i in contVars and xsp is not None:
ax.step(t,xsp[:,i],linestyle="--",color="black",where="post")
ax.set_ylabel(ylabelsx[i])
mpc.plots.zoomaxis(ax,yscale=1.1)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
for i in range(Nu):
ax = fig.add_subplot(gs[i*Nx:(i+1)*Nx,1])
for k in keys:
t = np.arange(0,u[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k)}
ax.step(t,u[k][:,i],where="post",**args)
if bounds is not None:
for b in set(["uub", "ulb"]).intersection(bounds.keys()):
ax.plot(np.array([t[0],t[-1]]),np.ones((2,))*bounds[b][i],
'--k')
ax.set_ylabel(ylabelsu[i])
mpc.plots.zoomaxis(ax,yscale=1.25)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
fig.legend(leglines,leglabels,loc="lower center",ncol=len(keys))
fig.tight_layout(pad=.5,rect=(0,.075,1,1))
if title is not None:
fig.canvas.set_window_title(title)
return fig
x = xcl['nmpc']
u = ucl['nmpc']
ptimes = tcl['nmpc']
# Make plots.
keys = ["uncont", "nmpc"]
colors = {"lmpc":"blue", "nmpc":"green", "uncont":"red"}
labels = {"lmpc":"LMPC", "nmpc":"NMPC", "uncont":"Uncontrolled"}
markers = {"lmpc":"s", "nmpc":"o", "uncont":"^"}
plotbounds = dict([(k,bounds[k][0]) for k in ["ulb","uub"]])
fig = cstrplot(xcl, ucl, colors=colors, contVars=contVars, labels=labels,
keys=keys, markers={}, bounds=plotbounds, ilegend=2)
fig.show()
# mpc.plots.showandsave(fig,"cstr_startup.pdf",facecolor="none")
| gpl-3.0 |
fzalkow/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
jomivega/ASE4156 | tests/test_stocks.py | 1 | 3024 | """This module is for testing stocks"""
from unittest import mock
from django.test import TestCase
from stocks.models import Stock, DailyStockQuote
import pandas as pd
from yahoo_historical import Fetcher
from authentication.plaid_middleware import PlaidMiddleware
import pytest
class StocksViewTests(TestCase):
"""
Testing Stocks Model
"""
@classmethod
def setup_class(cls):
"""Setting up testing"""
cls._original_init_method = Fetcher.__init__
Fetcher.__init__ = mock.Mock(return_value=None)
PlaidMiddleware.__call__ = lambda self, request: self.get_response(request)
@classmethod
def teardown_class(cls):
"""Teardown testing"""
Fetcher.__init__ = cls._original_init_method
@mock.patch.object(
Fetcher,
'getHistorical',
mock.MagicMock(return_value=pd.DataFrame({
'Close': [1.5, 2.5],
'Date': ["2017-05-05", "2017-05-06"],
}))
)
@pytest.mark.django_db(transaction=True)
def test_api_for_real_stock(self):
"""
Testing adding stock via endpoint, asserting stock is inserted
"""
ticker = "googl"
name = "Google"
data = {'name': name, 'ticker': ticker}
request = self.client.post('/stocks/addstock/', data)
self.assertEqual(request.status_code, 200)
data = Stock.objects.all()
self.assertEqual(len(data), 1)
@mock.patch.object(
Fetcher,
'getHistorical',
mock.MagicMock(side_effect=KeyError('abc'))
)
def test_api_for_invalid_ticker(self):
"""
Testing adding stock via endpoint, asserting stock is inserted but no
data added to DailyStockQuote since ticker is invalid
"""
ticker = "xxx"
name = "Julian"
data = {'name': name, 'ticker': ticker}
request = self.client.post('/stocks/addstock/', data)
self.assertEqual(request.status_code, 500)
data = DailyStockQuote.objects.all()
self.assertEqual(len(data), 0)
def test_api_with_invalid_call(self):
"""
Endpoint only works with POST
"""
request = self.client.get('/stocks/addstock/')
self.assertEqual(request.status_code, 405)
@mock.patch.object(
Fetcher,
'getHistorical',
mock.MagicMock(return_value=pd.DataFrame({
'Close': [1.5, 2.5],
'Date': ["2017-05-05", "2017-05-06"],
}))
)
@pytest.mark.django_db(transaction=True)
def test_fill_quote_history(self):
"""
Filling data for Stock
"""
ticker = "ibm"
name = "IBM"
data = {'name': name, 'ticker': ticker}
request = self.client.get('/stocks/addstock/', data)
stock_id = request.content
data = DailyStockQuote.objects.filter(stock_id=stock_id)
stock_data = Stock.objects.filter(id=stock_id)
self.assertGreater(len(data), 0)
self.assertEqual(len(stock_data), 1)
| apache-2.0 |
xyguo/scikit-learn | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
KellyChan/Python | python/data_science/NYC/analysis3_predictions.py | 3 | 3515 | import numpy as np
import pandas
def normalize_features(array):
"""
Normalize the features in our data set.
"""
array_normalized = (array-array.mean())/array.std()
mu = array.mean()
sigma = array.std()
return array_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values, and the values for our thetas.
This should be the same code as the compute_cost function in the lesson #3 exercises. But
feel free to implement your own.
"""
# your code here
m = len(values)
sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
This is the same gradient descent code as in the lesson #3 exercises. But feel free
to implement your own.
"""
m = len(values)
cost_history = []
for i in range(num_iterations):
# your code here
# Calculate cost
cost = compute_cost(features, values, theta)
# Append cost to history
cost_history.append(cost)
# Calculate new theta
theta = theta + (alpha / m) * np.dot((values - np.dot(features,theta)),features)
return theta, pandas.Series(cost_history)
def predictions(dataframe):
'''
The NYC turnstile data is stored in a pandas dataframe called weather_turnstile.
Using the information stored in the dataframe, lets predict the ridership of
the NYC subway using linear regression with gradient descent.
You can look at information contained in the turnstile weather dataframe
at the link below:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
Your prediction should have a R^2 value of .40 or better.
Note: due to the memory and CPU limitation of our amazon EC2 instance, we will
give you a random subet (~15%) of the data contained in turnstile_data_master_with_weather.csv
If you receive a "server has encountered an error" message, that means you are hitting
the 30 second limit that's placed on running your program. Try using a smaller number
for num_iterations if that's the case.
Or if you are using your own algorithm/modesl, see if you can optimize your code so it
runs faster.
'''
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']].join(dummy_units)
values = dataframe[['ENTRIESn_hourly']]
m = len(values)
features, mu, sigma = normalize_features(features)
features['ones'] = np.ones(m)
features_array = np.array(features)
values_array = np.array(values).flatten()
#Set values for alpha, number of iterations.
alpha = 0.1 # please feel free to play with this value
num_iterations = 75 # please feel free to play with this value
#Initialize theta, perform gradient descent
theta_gradient_descent = np.zeros(len(features.columns))
theta_gradient_descent, cost_history = gradient_descent(features_array, values_array, theta_gradient_descent,
alpha, num_iterations)
#print cost_history
prediction = np.dot(features_array, theta_gradient_descent)
return prediction | mit |
hstau/manifold-cryo | fit_1D_open_manifold_3D.py | 1 | 5015 | import numpy as np
import get_fit_1D_open_manifold_3D_param
import solve_d_R_d_tau_p_3D
import a
from scipy.io import loadmat
import matplotlib.pyplot as plt
#import matplotlib.pyplot as plt
'''
function [a,b,tau] = fit_1D_open_manifold_3D(psi)
%
% fit_1D_open_manifold_3D
%
% fit the eigenvectors for a 1D open manifold to the model
% x_ij = a_j cos(j*pi*tau_i) + b_j.
%
% j goes from 1 to 3 (this is only for 3D systems).
%
% i goes from 1 to nS where nS is the number of data points to be fitted.
%
% For a fixed set of a_j and b_j, j=1:3, tau_i for i=1:nS are
% obtained by putting dR/d(tau_i) to zero.
%
% For a fixed set of tau_i, i=1:nS, a_j and b_j for j=1:3 are
% obtained by solving 3 sets of 2x2 linear equations.
%
% Fit parameters and initial set of {\tau} are specified in
%
% get_fit_1D_open_manifold_3D_param.m
%
% copyright (c) Russell Fung 2014
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global p nDim a b x x_fit
'''
'''
def plot_fitted_curve(hFig):
global x x_fit
h = plt.figure(hFig)
hsp = plt.subplot(2,2,1)
plot3(x(:,1),x(:,2),x(:,3),'b.','lineWidth',1);
hold on
plot3(x_fit(:,1),x_fit(:,2),x_fit(:,3),'g.','lineWidth',1);
hold off
set(hsp,'lineWidth',2,'fontSize',15);
hsp = subplot(2,2,2);
plotRF(hsp,x(:,1),x(:,2),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,2),'g.');
hsp = subplot(2,2,3);
plotRF(hsp,x(:,1),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,3),'g.');
hsp = subplot(2,2,4);
plotRF(hsp,x(:,2),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,2),x_fit(:,3),'g.');
drawnow
%end
'''
eps = 1e-4
#global maxIter,delta_a_max, delta_b_max,delta_tau_max,a_b_tau_result
def op(psi):
a.init()
#global p, nDim, a, b, x, x_fit
a.nDim = 3
#tau = get_fit_1D_open_manifold_3D_param
tau = get_fit_1D_open_manifold_3D_param.op(psi)
aux = np.zeros((tau.shape[0],5)) #added
nS = a.x.shape[0]
for iter in xrange(1,a.maxIter+1):
string ='iteration ' + str(iter)
print string
'''
#%%%%%%%%%%%%%%%%%%%%%
#% solve for a and b %
#%%%%%%%%%%%%%%%%%%%%%
'''
a_old = a.a
b_old = a.b
j_pi_tau = np.dot(tau,np.pi*np.array([[1,2,3]]))
cos_j_pi_tau = np.cos(j_pi_tau)
A11 = np.sum(cos_j_pi_tau**2, axis=0)
A12 = np.sum(cos_j_pi_tau, axis=0)
A21 = A12
A22 = nS
x_cos_j_pi_tau = a.x*cos_j_pi_tau
b1 = np.sum(x_cos_j_pi_tau, axis=0)
b2 = np.sum(a.x, axis=0)
coeff = np.zeros((2,3))
for qq in xrange(3):
A = np.array([[A11[qq],A12[qq]],[A21[qq], A22]])
b = np.array([b1[qq], b2[qq]])
coeff[:,qq] = np.linalg.solve(A,b)
a.a = coeff[0,:]
a.b = coeff[1,:]
'''
%%%%%%%%%%%%%%%%%%%%%%%%%
#% plot the fitted curve %
%%%%%%%%%%%%%%%%%%%%%%%%%
'''
j_pi_tau = np.dot(np.linspace(0,1,1000).reshape(-1,1),np.array([[1,2,3]]))*np.pi
cos_j_pi_tau = np.cos(j_pi_tau)
tmp = a.a*cos_j_pi_tau
a.x_fit = tmp + a.b
#%plot_fitted_curve(iter)
'''
%%%%%%%%%%%%%%%%%
#% solve for tau %
%%%%%%%%%%%%%%%%%
'''
tau_old = tau
for a.p in xrange(nS):
tau[a.p],beta = solve_d_R_d_tau_p_3D.op() #added
for kk in xrange(beta.shape[0]):
aux[a.p,kk] = beta[kk]
'''
if iter == 0:
data = loadmat('aux0.mat') # (this is for < v7.3
elif iter == 1:
data = loadmat('aux1.mat') # (this is for < v7.3
else:
data = loadmat('aux2.mat') # (this is for < v7.3
imaux = data['aux']
plt.subplot(2, 2, 1)
plt.imshow(aux, cmap=plt.get_cmap('gray'),aspect=0.1)
plt.title('aux')
plt.subplot(2, 2, 2)
plt.imshow(imaux, cmap=plt.get_cmap('gray'), aspect=0.1)
plt.title('imaux')
plt.show()
'''
'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% calculate the changes in fitting parameters %
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
delta_a = np.fabs(a.a-a_old)/(np.fabs(a.a)+eps)
delta_b = np.fabs(a.b-b_old)/(np.fabs(a.b)+eps)
delta_tau = np.fabs(tau-tau_old)
delta_a = max(delta_a)*100
delta_b = max(delta_b)*100
delta_tau = max(delta_tau)
print ' changes in fitting parameters: \n'
string = ' amplitudes: '+ str(delta_a) + '\n' + \
' offsets: ' + str(delta_b) + ' \n' +\
' values of tau: ' + str(delta_tau) + ' \n'
print string
if (delta_a<a.delta_a_max) and (delta_b < a.delta_b_max) and (delta_tau < a.delta_tau_max):
break
return (a.a,a.b,tau)
| gpl-2.0 |
jmcarp/osf.io | scripts/analytics/addons.py | 21 | 2200 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'box',
'dataverse',
'dropbox',
'figshare',
'github',
'googledrive',
'mendeley',
's3',
'zotero',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
init_app(routes=False)
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
mihaic/brainiak | examples/fcma/mvpa_voxel_selection.py | 2 | 3996 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.mvpa_voxelselector import MVPAVoxelSelector
from brainiak.fcma.preprocessing import prepare_searchlight_mvpa_data
from brainiak import io
from sklearn import svm
import sys
from mpi4py import MPI
import logging
import nibabel as nib
import numpy as np
from brainiak.searchlight.searchlight import Searchlight
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
"""
example running command in run_mvpa_voxel_selection.sh
"""
if __name__ == '__main__':
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'programming starts in %d process(es)' %
MPI.COMM_WORLD.Get_size()
)
if len(sys.argv) != 6:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
suffix = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
# all MPI processes read the mask; the mask file is small
mask_image = nib.load(mask_file)
mask = io.load_boolean_mask(mask_file)
data = None
labels = None
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'mask size: %d' %
np.sum(mask)
)
images = io.load_images_from_dir(data_dir, suffix=suffix)
conditions = io.load_labels(epoch_file)
data, labels = prepare_searchlight_mvpa_data(images, conditions)
# setting the random argument produces random voxel selection results
# for non-parametric statistical analysis.
# There are three random options:
# RandomType.NORANDOM is the default
# RandomType.REPRODUCIBLE permutes the voxels in the same way every run
# RandomType.UNREPRODUCIBLE permutes the voxels differently across runs
# example
#from brainiak.fcma.preprocessing import RandomType
#data, labels = prepare_searchlight_mvpa_data(images, conditions,
# random=RandomType.UNREPRODUCIBLE)
# the following line is an example to leaving a subject out
#epoch_info = [x for x in epoch_info if x[1] != 0]
num_subjs = int(sys.argv[5])
# create a Searchlight object
sl = Searchlight(sl_rad=1)
mvs = MVPAVoxelSelector(data, mask, labels, num_subjs, sl)
clf = svm.SVC(kernel='linear', shrinking=False, C=1, gamma='auto')
# only rank 0 has meaningful return values
score_volume, results = mvs.run(clf)
# this output is just for result checking
if MPI.COMM_WORLD.Get_rank()==0:
score_volume = np.nan_to_num(score_volume.astype(np.float))
io.save_as_nifti_file(score_volume, mask_image.affine,
'result_score.nii.gz')
seq_volume = np.zeros(mask.shape, dtype=np.int)
seq = np.zeros(len(results), dtype=np.int)
with open('result_list.txt', 'w') as fp:
for idx, tuple in enumerate(results):
fp.write(str(tuple[0]) + ' ' + str(tuple[1]) + '\n')
seq[tuple[0]] = idx
seq_volume[mask] = seq
io.save_as_nifti_file(seq_volume, mask_image.affine,
'result_seq.nii.gz')
| apache-2.0 |
bibarz/bibarz.github.io | dabble/ab/auth_algorithms.py | 1 | 17145 | # Import any required libraries or modules.
import numpy as np
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import csv
import sys
class MetaParams:
n_lda_ensemble = 101
lda_ensemble_feature_fraction = 0.4
mode = 'lda_ensemble'
# The following is a hacky container for Statistics computed from the
# whole training set; we don't want to have to recompute them again at every call
# to build_template (it becomes slow for parameter searches with cross validation),
# so we preserve it here between calls. The proper place to
# do this would be in main.py, but we don't want to touch that.
Global = lambda: None
Global.ready = False
def pca_converter(data, feature_discriminabilities, explained_variance):
'''
PCA conversion of the data. The PCA is based on the complete dataset, but each feature
is normalized to a std dev proportional to the given discriminability.
:param data: n_samples x n_features matrix with all data to do PCA on
:param feature_discriminabilities: n_features length vector
:param explained_variance: ratio of explained variance (between 0 and 1) that will
determine how many components are kept
:return: function transforming data into pca components, and covariance matrix
of transformed data
'''
mu = np.mean(data, axis=0)
std = np.std(data, axis=0) / feature_discriminabilities
normalized_data = (data - mu) / std
u, s, vt = np.linalg.svd(normalized_data)
cut_idx = np.argmin(np.abs(np.cumsum(s * s) / np.sum(s * s) - explained_variance))
vt = vt[:cut_idx + 1]
return (lambda x, mu=mu, std=std, vt=vt: np.dot((x - mu) / std, vt.T)),\
np.diag(s[:cut_idx + 1] ** 2 / (len(data) - 1))
def preprocess_data(data):
'''
Turn raw data into an array of hand-picked features useful for classification
:param data: n_samples x n_raw_features numpy array
:return: n_samples x n_processed_features array
'''
keypress_dt = data[:, 8::10] - data[:, 3::10] # duration of each keystroke
key_to_key_dt = data[:, 13::10] - data[:, 3:-10:10] # interval between keystrokes
x_down = data[:, 4::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_down = data[:, 5::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
x_up = data[:, 9::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_up = data[:, 10::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
size_down = data[:, 6::10]
size_up = data[:, 11::10]
pressure_down = data[:, 7::10]
pressure_up = data[:, 12::10]
assert np.all((x_down >= 0) & (x_down <= 1) & (y_down >= 0) & (y_down <= 1))
assert np.all((x_up >= 0) & (x_up <= 1) & (y_up >= 0) & (y_up <= 1))
touch_d = np.hypot(x_down - x_up, y_down - y_up)
collected_data = np.hstack((keypress_dt, key_to_key_dt,
np.diff(x_down, axis=1), np.diff(y_down, axis=1),
touch_d,
size_down, size_up, pressure_down, pressure_up,
))
return collected_data
def get_random_feature_selector(n_all_features, feature_fraction, seed):
'''
Return a selector of random features from a data array
:param n_all_features: total number of features
:param feature_fraction: desired fraction of selected features
:param seed: random seed for repeatable experiments
:return: a function taking in full data and returning only the random features from it
'''
n_features = int(np.round(feature_fraction * n_all_features))
rng = np.random.RandomState(seed)
p = rng.permutation(n_all_features)[:n_features]
return lambda x, p=p: x[..., p]
def simple_gaussian(user_pca):
# template will consist of mean and std dev of each feature in pca space
mean_pca = np.mean(user_pca, axis=0)
std_pca = np.std(user_pca, axis=0)
return mean_pca, std_pca
def scikit_classifier(user, training_dataset, generator=lambda:KNeighborsClassifier(5)):
'''
Train a given classifier on user vs others
:param generator: a function creating a scikit classifier with fit and predict functions
:return: the trained classifier
'''
all_users = training_dataset.keys()
others_raw = np.vstack([training_dataset[u] for u in all_users if u != user])
others_pca = Global.pca(preprocess_data(others_raw))
user_raw = training_dataset[user]
user_pca = Global.pca(preprocess_data(user_raw))
clf = generator()
clf.fit(np.vstack((user_pca, others_pca)),
np.hstack((np.zeros(len(user_pca)), np.ones(len(others_pca)))))
return clf
def lda(user_pca, all_pca_cov, n_all):
'''
Compute the Fisher discriminant vector and threshold to classify user vs others.
:param user_pca: n_samples x n_pca_features array of user instances
:param all_pca_cov: covariance matrix of the complete dataset; it is assumed that
the user data was part of the dataset, and that the mean of the whole dataset
is 0 for every feature
:param n_all: number of samples that formed the complete dataset
:return: Fisher discriminant vector, threshold
'''
n_user = len(user_pca)
assert n_user < n_all - 1 # make sure the complete dataset has more than just the current user
# We compute mean and variance for the user data directly, and infer the mean
# and variance of the rest of the dataset from the covariance of the complete set
# (and its mean, which is assumed zero)
user_mu = np.mean(user_pca, axis=0)
others_mu = - n_user * user_mu / (n_all - n_user)
user_sigma = np.cov(user_pca.T)
def sq_(x):
return x[:, None] * x[None, :]
others_sigma = ((n_all - 1) * all_pca_cov - (n_user - 1) * user_sigma\
- n_user * sq_(user_mu) - (n_all - n_user) * sq_(others_mu)) / (n_all - n_user - 1)
ld_vector = np.dot(np.linalg.inv(user_sigma + others_sigma), user_mu - others_mu) # order determines sign of criterion
ld_vector /= np.linalg.norm(ld_vector)
# find the threshold for equal false positives and false negatives
user_proj_mu = np.dot(user_mu, ld_vector)
others_proj_mu = np.dot(others_mu, ld_vector)
user_proj_std = np.sqrt(np.dot(ld_vector, np.dot(user_sigma, ld_vector)))
others_proj_std = np.sqrt(np.dot(ld_vector, np.dot(others_sigma, ld_vector)))
ld_threshold = (others_proj_std * user_proj_mu + user_proj_std * others_proj_mu) / (user_proj_std + others_proj_std)
return ld_vector, ld_threshold
def compute_feature_discriminabilities(each_preprocessed):
'''
Return a vector of discriminability for each feature
:param each_preprocessed: list with one n_samples x n_features data matrix for each user
:return: vector of discriminabilities (sqrt of the square of the difference of means divided by
the sum of variances) for each feature
'''
n_users = len(each_preprocessed)
each_mu = np.array([np.mean(m, axis=0) for m in each_preprocessed]) # n_users x n_features
each_var = np.array([np.var(m, axis=0) for m in each_preprocessed]) # n_users x n_features
# compute discriminability for each feature and pair of users
pairwise_discriminability = (each_mu[:, None, :] - each_mu[None :, :]) ** 2 / (1e-6 + each_var[:, None, :] + each_var[None :, :])
# compute discriminability of each feature as the average over pairs of users
return np.sqrt(np.sum(pairwise_discriminability, axis=(0, 1)) / (n_users * (n_users - 1)))
def _prepare_global(training_dataset):
'''
Processing of the complete dataset, to be reused for each user
- feature preprocessing
- pca converter
- selection of features and computation of covariances for ensemble lda
:param training_dataset: the complete dataset
:return: None. The Global container is initialized with all necessary data
'''
each_preprocessed = [preprocess_data(training_dataset[u]) for u in training_dataset]
Global.feature_discriminabilities = compute_feature_discriminabilities(each_preprocessed)
all_preprocessed = np.vstack(each_preprocessed)
Global.n_all = len(all_preprocessed)
Global.pca, Global.all_pca_cov = pca_converter(all_preprocessed, Global.feature_discriminabilities, explained_variance=0.98)
if MetaParams.mode == 'lda_ensemble':
Global.lda_ensemble = []
for i in range(MetaParams.n_lda_ensemble):
seed = np.random.randint(200000)
feature_selector = get_random_feature_selector(all_preprocessed.shape[1],
feature_fraction=MetaParams.lda_ensemble_feature_fraction, seed=seed)
selected_pca, selected_pca_cov = pca_converter(feature_selector(all_preprocessed),
feature_selector(Global.feature_discriminabilities),
explained_variance=0.99)
Global.lda_ensemble.append({'selector': feature_selector, 'pca': selected_pca, 'pca_cov': selected_pca_cov})
Global.ready = True
# Implement template building here. Feel free to write any helper classes or functions required.
# Return the generated template for that user.
def build_template(user, training_dataset):
if not Global.ready:
_prepare_global(training_dataset)
user_raw = training_dataset[user]
user_preprocessed = preprocess_data(user_raw)
template = {}
if MetaParams.mode in ['lda', 'simple', 'combined']:
user_pca = Global.pca(user_preprocessed)
template['mean_pca'], template['std_pca'] = simple_gaussian(user_pca)
template['ld_vector'], template['ld_threshold'] =\
lda(user_pca, all_pca_cov=Global.all_pca_cov, n_all=Global.n_all)
if MetaParams.mode == 'lda_ensemble':
lda_ensemble = []
for lda_item in Global.lda_ensemble:
user_selected_pca = lda_item['pca'](lda_item['selector'](user_preprocessed))
ld_vector, ld_threshold = lda(user_selected_pca, n_all=Global.n_all, all_pca_cov=lda_item['pca_cov'])
lda_ensemble.append({'ld_vector': ld_vector, 'ld_threshold': ld_threshold})
template['lda_ensemble'] = lda_ensemble
if MetaParams.mode in ['nonlinear', 'combined']:
template['clf_1'] = scikit_classifier(user, training_dataset, generator=lambda: KNeighborsClassifier(5))
template['clf_2'] = scikit_classifier(user, training_dataset, generator=lambda: svm.LinearSVC(C=0.05, class_weight='balanced'))
return template
# Implement authentication method here. Feel free to write any helper classes or functions required.
# Return the authenttication score and threshold above which you consider it being a correct user.
def authenticate(instance, user, templates):
mode = MetaParams.mode
assert mode in ['lda', 'combined', 'lda_ensemble', 'nonlinear', 'simple'], ("Unrecognized mode: %s" % mode)
t = templates[user]
batch_mode = instance.ndim > 1
if not batch_mode:
instance = instance[None, :]
preprocessed_instance = preprocess_data(instance)
if mode in ['lda', 'combined']:
user_pca = Global.pca(preprocessed_instance)
user_lda_proj = np.dot(user_pca, t['ld_vector'])
lda_score, lda_thr = user_lda_proj - t['ld_threshold'], np.zeros(len(user_lda_proj))
if mode in ['nonlinear', 'combined']:
user_pca = Global.pca(preprocessed_instance)
clf_score_1, clf_thr_1 = (t['clf_1'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
clf_score_2, clf_thr_2 = (t['clf_2'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
if mode == 'simple':
user_pca = Global.pca(preprocessed_instance)
z = (user_pca - t['mean_pca']) / t['std_pca']
distance = np.mean(np.abs(z) ** 2, axis=1) ** 0.5
score, thr = distance, 1.2 * np.ones(len(distance))
if mode == 'lda_ensemble':
ensemble_scores = np.empty((len(preprocessed_instance), len(t['lda_ensemble'])))
for i, sub_t in enumerate(t['lda_ensemble']):
g_item = Global.lda_ensemble[i]
user_selected_pca = g_item['pca'](g_item['selector'](preprocessed_instance))
user_thinned_lda_proj = np.dot(user_selected_pca, sub_t['ld_vector'])
ensemble_scores[:, i] = user_thinned_lda_proj - sub_t['ld_threshold']
score = np.mean(ensemble_scores > 0, axis=1)
thr = 0.5 * np.ones(len(score))
if mode == 'lda':
score, thr = lda_score, lda_thr
elif mode == 'nonlinear':
score, thr = clf_score_1, clf_thr_1
elif mode == 'combined':
score = np.mean(np.vstack((lda_score > lda_thr, clf_score_1 > clf_thr_1, clf_score_2 > clf_thr_2)), axis=0)
thr = 0.5 * np.ones(len(score))
if not batch_mode:
assert score.shape == (1, )
assert thr.shape == (1, )
score, thr = score[0], thr[0]
return score, thr
def cross_validate(full_dataset, print_results=False):
'''
n-fold cross-validation of given dataset
:param full_dataset: dictionary of raw data for each user
:param print_results: if True, print progress messages and results
:return: (percentage of false rejects, percentage of false accepts)
'''
n_folds = 5 # for cross-validation
all_false_accept = 0
all_false_reject = 0
all_true_accept = 0
all_true_reject = 0
for i in range(n_folds):
# split full dataset into training and validation
training_dataset = dict()
validation_dataset = dict()
for u in full_dataset.keys():
n = len(full_dataset[u])
idx = np.round(float(n) / n_folds * np.arange(n_folds + 1)).astype(np.int)
n_validation = np.diff(idx)
rolled_set = np.roll(full_dataset[u], -idx[i], axis=0)
training_dataset[u] = rolled_set[n_validation[i]:, :]
validation_dataset[u] = rolled_set[:n_validation[i], :]
# reset global data
Global.ready = False
templates = {u: build_template(u, training_dataset) for u in training_dataset}
# For each user test authentication.
true_accept = 0
false_reject = 0
true_reject = 0
false_accept = 0
for u in training_dataset:
# Test false rejections.
(score, threshold) = authenticate(validation_dataset[u], u, templates)
true_accept += np.sum(score > threshold)
false_reject += np.sum(score <= threshold)
# Test false acceptance.
for u_attacker in validation_dataset:
if u == u_attacker:
continue
(score, threshold) = authenticate(validation_dataset[u_attacker], u, templates)
false_accept += np.sum(score > threshold)
true_reject += np.sum(score <= threshold)
if print_results:
print "fold %i: false reject rate: %.1f%%, false accept rate: %.1f%%" %\
(i, 100. * float(false_reject) / (false_reject + true_accept),
100. * float(false_accept) / (false_accept + true_reject))
all_false_accept += false_accept
all_false_reject += false_reject
all_true_accept += true_accept
all_true_reject += true_reject
false_reject_percent = 100. * float(all_false_reject) / (all_false_reject + all_true_accept)
false_accept_percent = 100. * float(all_false_accept) / (all_false_accept + all_true_reject)
if print_results:
print "Total: false reject rate: %.1f%%, false accept rate: %.1f%%" % (false_reject_percent, false_accept_percent)
return false_reject_percent, false_accept_percent
if __name__ == "__main__":
# Reading the data into the training dataset separated by user.
data_training_file = open('dataset_training.csv', 'rb')
csv_training_reader = csv.reader(data_training_file, delimiter=',', quotechar='"')
csv_training_reader.next()
full_dataset = dict()
for row in csv_training_reader:
if row[0] not in full_dataset:
full_dataset[row[0]] = np.array([]).reshape((0, len(row[1:])))
full_dataset[row[0]] = np.vstack([full_dataset[row[0]], np.array(row[1:]).astype(float)])
for feature_fraction in [0.4]:
for n_lda_ensemble in [51]:
n_trials = 10
tot_rej = 0
tot_acc = 0
for _ in range(n_trials):
MetaParams.feature_fraction = feature_fraction
MetaParams.n_lda_ensemble = n_lda_ensemble
rej, acc = cross_validate(full_dataset)
tot_rej += rej
tot_acc += acc
print "feature fraction=%.2f, ensemble size=%i, false_rej=%.2f%%, false_acc=%.2f%%" % (feature_fraction, n_lda_ensemble, tot_rej / n_trials, tot_acc / n_trials)
| mit |
wanglei828/apollo | modules/tools/plot_planning/speed_dsteering_data.py | 1 | 3396 | #!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
from record_reader import RecordItemReader
import matplotlib.pyplot as plt
from cyber_py.record import RecordReader
from modules.canbus.proto import chassis_pb2
class SpeedDsteeringData:
def __init__(self):
self.last_steering_percentage = None
self.last_speed_mps = None
self.last_timestamp_sec = None
self.speed_data = []
self.d_steering_data = []
def add(self, chassis):
steering_percentage = chassis.steering_percentage
speed_mps = chassis.speed_mps
timestamp_sec = chassis.header.timestamp_sec
if self.last_timestamp_sec is None:
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
return
if (timestamp_sec - self.last_timestamp_sec) > 0.02:
d_steering = (steering_percentage - self.last_steering_percentage) \
/ (timestamp_sec - self.last_timestamp_sec)
self.speed_data.append(speed_mps)
self.d_steering_data.append(d_steering)
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
def get_speed_dsteering(self):
return self.speed_data, self.d_steering_data
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
folders = sys.argv[1:]
fig, ax = plt.subplots()
colors = ["g", "b", "r", "m", "y"]
markers = ["o", "o", "o", "o"]
for i in range(len(folders)):
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
processor = SpeedDsteeringData()
last_pose_data = None
last_chassis_data = None
topics = ["/apollo/localization/pose", "/apollo/canbus/chassis"]
for data in reader.read(topics):
if "chassis" in data:
last_chassis_data = data["chassis"]
if last_chassis_data is not None:
processor.add(last_chassis_data)
#last_pose_data = None
#last_chassis_data = None
data_x, data_y = processor.get_speed_dsteering()
ax.scatter(data_x, data_y, c=color, marker=marker, alpha=0.2)
plt.show()
| apache-2.0 |
PanDAWMS/panda-server | pandaserver/daemons/scripts/copyArchive.py | 1 | 71078 | import os
import re
import sys
import time
import fcntl
import shelve
import datetime
import traceback
import requests
from urllib3.exceptions import InsecureRequestWarning
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer import EventServiceUtils
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.jobdispatcher.Watcher import Watcher
from pandaserver.brokerage.SiteMapper import SiteMapper
# from pandaserver.dataservice.MailUtils import MailUtils
from pandaserver.srvcore.CoreUtils import commands_get_status_output
from pandaserver.config import panda_config
# logger
_logger = PandaLogger().getLogger('copyArchive')
# main
def main(argv=tuple(), tbuf=None, **kwargs):
# password
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
_logger.debug("===================== start =====================")
# memory checker
def _memoryCheck(str):
try:
proc_status = '/proc/%d/status' % os.getpid()
procfile = open(proc_status)
name = ""
vmSize = ""
vmRSS = ""
# extract Name,VmSize,VmRSS
for line in procfile:
if line.startswith("Name:"):
name = line.split()[-1]
continue
if line.startswith("VmSize:"):
vmSize = ""
for item in line.split()[1:]:
vmSize += item
continue
if line.startswith("VmRSS:"):
vmRSS = ""
for item in line.split()[1:]:
vmRSS += item
continue
procfile.close()
_logger.debug('MemCheck - %s Name=%s VSZ=%s RSS=%s : %s' % (os.getpid(),name,vmSize,vmRSS,str))
except Exception:
type, value, traceBack = sys.exc_info()
_logger.error("memoryCheck() : %s %s" % (type,value))
_logger.debug('MemCheck - %s unknown : %s' % (os.getpid(),str))
return
_memoryCheck("start")
# # kill old dq2 process
# try:
# # time limit
# timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# # get process list
# scriptName = sys.argv[0]
# out = commands_get_status_output(
# 'ps axo user,pid,lstart,args | grep dq2.clientapi | grep -v PYTHONPATH | grep -v grep')[-1]
# for line in out.split('\n'):
# if line == '':
# continue
# items = line.split()
# # owned process
# if items[0] not in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
# continue
# # look for python
# if re.search('python',line) is None:
# continue
# # PID
# pid = items[1]
# # start time
# timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
# startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# # kill old process
# if startTime < timeLimit:
# _logger.debug("old dq2 process : %s %s" % (pid,startTime))
# _logger.debug(line)
# commands_get_status_output('kill -9 %s' % pid)
# except Exception:
# type, value, traceBack = sys.exc_info()
# _logger.error("kill dq2 process : %s %s" % (type,value))
#
#
# # kill old process
# try:
# # time limit
# timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=7)
# # get process list
# scriptName = sys.argv[0]
# out = commands_get_status_output('ps axo user,pid,lstart,args | grep %s' % scriptName)[-1]
# for line in out.split('\n'):
# items = line.split()
# # owned process
# if items[0] not in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
# continue
# # look for python
# if re.search('python',line) is None:
# continue
# # PID
# pid = items[1]
# # start time
# timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
# startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# # kill old process
# if startTime < timeLimit:
# _logger.debug("old process : %s %s" % (pid,startTime))
# _logger.debug(line)
# commands_get_status_output('kill -9 %s' % pid)
# except Exception:
# type, value, traceBack = sys.exc_info()
# _logger.error("kill process : %s %s" % (type,value))
# instantiate TB
# if tbuf is None:
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# else:
# taskBuffer = tbuf
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# send email for access requests
_logger.debug("Site Access")
try:
# get contact
contactAddr = {}
siteContactAddr = {}
sql = "SELECT name,email FROM ATLAS_PANDAMETA.cloudconfig"
status,res = taskBuffer.querySQLS(sql,{})
for cloudName,cloudEmail in res:
contactAddr[cloudName] = cloudEmail
# get requests
sql = "SELECT pandaSite,status,dn FROM ATLAS_PANDAMETA.siteaccess WHERE status IN (:status1,:status2,:status3) "
sql += "ORDER BY pandaSite,status "
varMap = {}
varMap[':status1'] = 'requested'
varMap[':status2'] = 'tobeapproved'
varMap[':status3'] = 'toberejected'
status,res = taskBuffer.querySQLS(sql,varMap)
requestsInCloud = {}
# mailUtils = MailUtils()
# loop over all requests
for pandaSite,reqStatus,userName in res:
cloud = siteMapper.getSite(pandaSite).cloud
_logger.debug("request : '%s' site=%s status=%s cloud=%s" % (userName,pandaSite,reqStatus,cloud))
# send emails to user
if reqStatus in ['tobeapproved','toberejected']:
# set status
if reqStatus == 'tobeapproved':
newStatus = 'approved'
else:
newStatus = 'rejected'
# get mail address for user
userMailAddr = ''
sqlUM = "SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:userName"
varMap = {}
varMap[':userName'] = userName
stUM,resUM = taskBuffer.querySQLS(sqlUM,varMap)
if resUM is None or len(resUM) == 0:
_logger.error("email address is unavailable for '%s'" % userName)
else:
userMailAddr = resUM[0][0]
# send
# if userMailAddr not in ['',None,'None','notsend']:
# _logger.debug("send update to %s" % userMailAddr)
# retMail = mailUtils.sendSiteAccessUpdate(userMailAddr,newStatus,pandaSite)
# _logger.debug(retMail)
# update database
sqlUp = "UPDATE ATLAS_PANDAMETA.siteaccess SET status=:newStatus "
sqlUp += "WHERE pandaSite=:pandaSite AND dn=:userName"
varMap = {}
varMap[':userName'] = userName
varMap[':newStatus'] = newStatus
varMap[':pandaSite'] = pandaSite
stUp,resUp = taskBuffer.querySQLS(sqlUp,varMap)
else:
# append cloud
requestsInCloud.setdefault(cloud, {})
# append site
requestsInCloud[cloud].setdefault(pandaSite, [])
# append user
requestsInCloud[cloud][pandaSite].append(userName)
# send requests to the cloud responsible
for cloud in requestsInCloud:
requestsMap = requestsInCloud[cloud]
_logger.debug("requests for approval : cloud=%s" % cloud)
# send
if cloud in contactAddr and contactAddr[cloud] not in ['',None,'None']:
# get site contact
for pandaSite in requestsMap:
userNames = requestsMap[pandaSite]
if pandaSite not in siteContactAddr:
varMap = {}
varMap[':siteid'] = pandaSite
sqlSite = "SELECT email FROM ATLAS_PANDAMETA.schedconfig WHERE siteid=:siteid AND rownum<=1"
status,res = taskBuffer.querySQLS(sqlSite,varMap)
siteContactAddr[pandaSite] = res[0][0]
# append
if siteContactAddr[pandaSite] not in ['',None,'None']:
contactAddr[cloud] += ',%s' % siteContactAddr[pandaSite]
else:
_logger.error("contact email address is unavailable for %s" % cloud)
except Exception:
type, value, traceBack = sys.exc_info()
_logger.error("Failed with %s %s" % (type,value))
_logger.debug("Site Access : done")
# finalize failed jobs
_logger.debug("AnalFinalizer session")
try:
# get min PandaID for failed jobs in Active table
sql = "SELECT MIN(PandaID),prodUserName,jobDefinitionID,jediTaskID,computingSite FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus "
sql += "GROUP BY prodUserName,jobDefinitionID,jediTaskID,computingSite "
varMap = {}
varMap[':jobStatus'] = 'failed'
varMap[':prodSourceLabel'] = 'user'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is not None:
# loop over all user/jobdefID
for pandaID,prodUserName,jobDefinitionID,jediTaskID,computingSite in res:
# check
_logger.debug("check finalization for %s task=%s jobdefID=%s site=%s" % (prodUserName,jediTaskID,
jobDefinitionID,
computingSite))
sqlC = "SELECT COUNT(*) FROM ("
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND jobDefinitionID=:jobDefinitionID "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += "UNION "
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsDefined4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND jobDefinitionID=:jobDefinitionID "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += ") "
varMap = {}
varMap[':jobStatus1'] = 'failed'
varMap[':jobStatus2'] = 'merging'
varMap[':prodSourceLabel'] = 'user'
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
statC,resC = taskBuffer.querySQLS(sqlC,varMap)
# finalize if there is no non-failed jobs
if resC is not None:
_logger.debug("n of non-failed jobs : %s" % resC[0][0])
if resC[0][0] == 0:
jobSpecs = taskBuffer.peekJobs([pandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
if jobSpec is None:
_logger.debug("skip PandaID={0} not found in jobsActive".format(pandaID))
continue
_logger.debug("finalize %s %s" % (prodUserName,jobDefinitionID))
finalizedFlag = taskBuffer.finalizePendingJobs(prodUserName,jobDefinitionID)
_logger.debug("finalized with %s" % finalizedFlag)
if finalizedFlag and jobSpec.produceUnMerge():
# collect sub datasets
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) is not None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets")
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList)
else:
_logger.debug("n of non-failed jobs : None")
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("AnalFinalizer failed with %s %s" % (errType,errValue))
# finalize failed jobs
_logger.debug("check stuck mergeing jobs")
try:
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# get PandaIDs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus'] = 'merging'
varMap[':timeLimit'] = timeLimit
sql = "SELECT distinct jediTaskID FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus and modificationTime<:timeLimit "
tmp,res = taskBuffer.querySQLS(sql,varMap)
checkedDS = set()
for jediTaskID, in res:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':dsType'] = 'trn_log'
sql = "SELECT datasetID FROM ATLAS_PANDA.JEDI_Datasets WHERE jediTaskID=:jediTaskID AND type=:dsType AND nFilesUsed=nFilesTobeUsed "
tmpP,resD = taskBuffer.querySQLS(sql,varMap)
for datasetID, in resD:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':fileStatus'] = 'ready'
varMap[':datasetID'] = datasetID
sql = "SELECT PandaID FROM ATLAS_PANDA.JEDI_Dataset_Contents "
sql += "WHERE jediTaskID=:jediTaskID AND datasetid=:datasetID AND status=:fileStatus AND PandaID=OutPandaID AND rownum<=1 "
tmpP,resP = taskBuffer.querySQLS(sql,varMap)
if resP == []:
continue
PandaID = resP[0][0]
varMap = {}
varMap[':PandaID'] = PandaID
varMap[':fileType'] = 'log'
sql = "SELECT d.status FROM ATLAS_PANDA.filesTable4 f,ATLAS_PANDA.datasets d WHERE PandaID=:PandaID AND f.type=:fileType AND d.name=f.destinationDBlock "
tmpS,resS = taskBuffer.querySQLS(sql,varMap)
if resS is not None:
subStatus, = resS[0]
if subStatus in ['completed']:
jobSpecs = taskBuffer.peekJobs([PandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) is not None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets for jediTaskID={0} PandaID={1}".format(jediTaskID,PandaID))
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList,updateCompleted=True)
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("check for stuck merging jobs failed with %s %s" % (errType,errValue))
# get sites to skip various timeout
varMap = {}
varMap[':status'] = 'paused'
sql = "SELECT siteid FROM ATLAS_PANDAMETA.schedconfig WHERE status=:status "
sitesToSkipTO = set()
status,res = taskBuffer.querySQLS(sql,varMap)
for siteid, in res:
sitesToSkipTO.add(siteid)
_logger.debug("PQs to skip timeout : {0}".format(','.join(sitesToSkipTO)))
sitesToDisableReassign = set()
# get sites to disable reassign
for siteName in siteMapper.siteSpecList:
siteSpec = siteMapper.siteSpecList[siteName]
if siteSpec.capability == 'ucore' and not siteSpec.is_unified:
continue
if siteSpec.disable_reassign():
sitesToDisableReassign.add(siteName)
_logger.debug("PQs to disable reassign : {0}".format(','.join(sitesToDisableReassign)))
_memoryCheck("watcher")
_logger.debug("Watcher session")
# get the list of workflows
sql = "SELECT DISTINCT workflow FROM ATLAS_PANDAMETA.schedconfig WHERE status='online' "
status, res = taskBuffer.querySQLS(sql, {})
workflow_timeout_map = {}
for workflow, in res + [('production',), ('analysis',)]:
timeout = taskBuffer.getConfigValue('watcher', 'HEARTBEAT_TIMEOUT_{0}'.format(workflow), 'pandaserver', 'atlas')
if timeout is not None:
workflow_timeout_map[workflow] = timeout
elif workflow in ['production', 'analysis']:
workflow_timeout_map[workflow] = 2
workflows = list(workflow_timeout_map)
_logger.debug("timeout : {0}".format(str(workflow_timeout_map)))
# check heartbeat for analysis jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=workflow_timeout_map['analysis'])
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2) "
sql += "AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Anal Watcher : %s" % res)
else:
_logger.debug("# of Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for analysis jobs in transferring
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=workflow_timeout_map['analysis'])
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'transferring'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "
sql += "AND jobStatus=:jobStatus1 AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Transferring Anal Watcher : %s" % res)
else:
_logger.debug("# of Transferring Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Trans Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for sent jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
varMap = {}
varMap[':jobStatus'] = 'sent'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND modificationTime<:modificationTime",
varMap)
if res is None:
_logger.debug("# of Sent Watcher : %s" % res)
else:
_logger.debug("# of Sent Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Sent Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=30,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for 'holding' analysis/ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# get XMLs
xmlIDs = set()
# xmlFiles = os.listdir(panda_config.logdir)
# for file in xmlFiles:
# match = re.search('^(\d+)_([^_]+)_.{36}$',file)
# if match is not None:
# id = match.group(1)
# xmlIDs.append(int(id))
job_output_report_list = taskBuffer.listJobOutputReport()
if job_output_report_list is not None:
for panda_id, job_status, attempt_nr, time_stamp in job_output_report_list:
xmlIDs.add(int(panda_id))
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime)) AND (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND stateChangeTime != modificationTime"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':prodSourceLabel3'] = 'ddm'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Holding Anal/DDM Watcher : %s" % res)
else:
_logger.debug("# of Holding Anal/DDM Watcher : %s - XMLs : %s" % (len(res),len(xmlIDs)))
for (id,) in res:
_logger.debug("Holding Anal/DDM Watcher %s" % id)
if int(id) in xmlIDs:
_logger.debug(" found XML -> skip %s" % id)
continue
thr = Watcher(taskBuffer,id,single=True,sleepTime=180,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for high prio production jobs
timeOutVal = 3
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND currentPriority>:pLimit "
sql += "AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':pLimit'] = 800
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of High prio Holding Watcher : %s" % res)
else:
_logger.debug("# of High prio Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("High prio Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
timeOutVal = 48
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Holding Watcher : %s" % res)
else:
_logger.debug("# of Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs with internal stage-out
sql = "SELECT PandaID,jobStatus,jobSubStatus FROM ATLAS_PANDA.jobsActive4 j,ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus=:jobStatus1 AND jobSubStatus IS NOT NULL AND modificationTime<:modificationTime "
for workflow in workflows:
if workflow == 'analysis':
continue
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':jobStatus1'] = 'transferring'
sqlX = sql
if workflow == 'production':
if len(workflows) > 2:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows:
if ng_workflow in ['production', 'analysis']:
continue
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeOutVal = workflow_timeout_map[workflow]
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res is None:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID, jobStatus, jobSubStatus in res:
_logger.debug("Internal Staging Watcher %s %s:%s" % (pandaID, jobStatus, jobSubStatus))
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
sql = "SELECT PandaID,jobStatus,j.computingSite FROM ATLAS_PANDA.jobsActive4 j, ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime "
for workflow in workflows:
if workflow == 'analysis':
continue
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sqlX = sql
if workflow == 'production':
if len(workflows) > 2:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows:
if ng_workflow in ['production', 'analysis']:
continue
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeOutVal = workflow_timeout_map[workflow]
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res is None:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID,jobStatus,computingSite in res:
if computingSite in sitesToSkipTO:
_logger.debug("skip General Watcher for PandaID={0} at {1} since timeout is disabled for {2}".format(pandaID,computingSite,jobStatus))
continue
_logger.debug("General Watcher %s" % pandaID)
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
_memoryCheck("reassign")
# kill long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
status,res = taskBuffer.querySQLS("SELECT PandaID,cloud,prodSourceLabel FROM ATLAS_PANDA.jobsDefined4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs=[]
dashFileMap = {}
if res is not None:
for pandaID,cloud,prodSourceLabel in res:
# collect PandaIDs
jobs.append(pandaID)
if len(jobs):
_logger.debug("killJobs for Defined (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'activated'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs=[]
if res is not None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("killJobs for Active (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting ddm jobs for dispatch
_logger.debug("kill PandaMovers")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
sql = "SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND transferType=:transferType AND creationTime<:creationTime"
varMap = {}
varMap[':creationTime'] = timeLimit
varMap[':prodSourceLabel'] = 'ddm'
varMap[':transferType'] = 'dis'
_logger.debug(sql+str(varMap))
status,res = taskBuffer.querySQLS(sql,varMap)
_logger.debug(res)
jobs=[]
if res is not None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("kill DDM Jobs (%s)" % str(jobs))
Client.killJobs(jobs,2)
# reassign activated jobs in inactive sites
inactiveTimeLimitSite = 2
inactiveTimeLimitJob = 4
inactivePrioLimit = 800
timeLimitSite = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitSite)
timeLimitJob = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitJob)
# get PandaIDs
sql = 'SELECT distinct computingSite FROM ATLAS_PANDA.jobsActive4 '
sql += 'WHERE prodSourceLabel=:prodSourceLabel '
sql += 'AND ((modificationTime<:timeLimit AND jobStatus=:jobStatus1) '
sql += 'OR (stateChangeTime<:timeLimit AND jobStatus=:jobStatus2)) '
sql += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sql += 'AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stDS,resDS = taskBuffer.querySQLS(sql,varMap)
sqlSS = 'SELECT laststart FROM ATLAS_PANDAMETA.siteData '
sqlSS += 'WHERE site=:site AND flag=:flag AND hours=:hours AND laststart<:laststart '
sqlPI = 'SELECT PandaID,eventService,attemptNr FROM ATLAS_PANDA.jobsActive4 '
sqlPI += 'WHERE prodSourceLabel=:prodSourceLabel AND jobStatus IN (:jobStatus1,:jobStatus2) '
sqlPI += 'AND (modificationTime<:timeLimit OR stateChangeTime<:timeLimit) '
sqlPI += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sqlPI += 'AND computingSite=:site AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
for tmpSite, in resDS:
if tmpSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs at inactive site %s since reassign is disabled' % (tmpSite))
continue
# check if the site is inactive
varMap = {}
varMap[':site'] = tmpSite
varMap[':flag'] = 'production'
varMap[':hours'] = 3
varMap[':laststart'] = timeLimitSite
stSS,resSS = taskBuffer.querySQLS(sqlSS,varMap)
if stSS is not None and len(resSS) > 0:
# get jobs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':site'] = tmpSite
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stPI,resPI = taskBuffer.querySQLS(sqlPI,varMap)
jediJobs = []
# reassign
_logger.debug('reassignJobs for JEDI at inactive site %s laststart=%s' % (tmpSite,resSS[0][0]))
if resPI is not None:
for pandaID, eventService, attemptNr in resPI:
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying es merge %s at inactive site %s' % (pandaID,tmpSite))
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI at inactive site %s (%s)' % (tmpSite,jediJobs[iJob:iJob+nJob]))
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign defined jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=4)
# get PandaIDs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,['defined'],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for defined jobs -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for defined jobs (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for JEDI defined jobs -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI defined jobs (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,[],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for long in defined table -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long in defined table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long JEDI in defined table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long JEDI in defined table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T1
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
for tmpCloud in siteMapper.getCloudList():
# ignore special clouds
if tmpCloud in ['CERN','OSG']:
continue
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[siteMapper.getCloud(tmpCloud)['tier1']],[],
True,onlyReassignable=True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T1 evgensimul in %s -> #%s' % (tmpCloud,len(jobs)))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T1 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T1 JEDI evgensimul in %s -> #%s' % (tmpCloud,len(jediJobs)))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T1 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T2
try:
_logger.debug('looking for stuck T2s to reassign evgensimul')
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
varMap = {}
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'running'
varMap[':prodSourceLabel'] = 'managed'
varMap[':processingType1'] = 'evgen'
varMap[':processingType2'] = 'simul'
sql = "SELECT cloud,computingSite,jobStatus,COUNT(*) FROM ATLAS_PANDA.jobsActive4 "\
"WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND prodSourceLabel=:prodSourceLabel "\
"AND processingType IN (:processingType1,:processingType2) GROUP BY cloud,computingSite,jobStatus "
status,res = taskBuffer.querySQLS(sql, varMap)
if res is not None:
# get ratio of activated/running
siteStatData = {}
for tmpCloud,tmpComputingSite,tmpJobStatus,tmpCount in res:
# skip T1
if tmpComputingSite == siteMapper.getCloud(tmpCloud)['tier1']:
continue
# skip if reassign is disabled
if tmpComputingSite in sitesToDisableReassign:
continue
# add cloud/site
tmpKey = (tmpCloud,tmpComputingSite)
if tmpKey not in siteStatData:
siteStatData[tmpKey] = {'activated':0,'running':0}
# add the number of jobs
if tmpJobStatus in siteStatData[tmpKey]:
siteStatData[tmpKey][tmpJobStatus] += tmpCount
# look for stuck site
stuckThr = 10
stuckSites = []
for tmpKey in siteStatData:
tmpStatData = siteStatData[tmpKey]
if tmpStatData['running'] == 0 or \
float(tmpStatData['activated'])/float(tmpStatData['running']) > stuckThr:
tmpCloud,tmpComputingSite = tmpKey
_logger.debug(' %s:%s %s/%s > %s' % (tmpCloud,tmpComputingSite,tmpStatData['activated'],tmpStatData['running'],stuckThr))
# get stuck jobs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[tmpComputingSite],[tmpCloud],True,
onlyReassignable=True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T2 evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jobs)))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T2 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T2 JEDI evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jediJobs)))
if len(jediJobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T2 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("failed to reassign T2 evgensimul with %s:%s" % (errType,errValue))
# reassign too long activated jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=2)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],[],[],[],True,
onlyReassignable=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long activated PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying {0} in long activated' % pandaID)
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long activated in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long activated in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long activated JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long activated JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long starting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=48)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['starting'],['managed'],[],[],[],True,
onlyReassignable=True,useStateChangeTime=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long starting PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long starting in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long starting in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long starting JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long stating JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# kill too long-standing analysis jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':prodSourceLabel1'] = 'test'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':prodSourceLabel3'] = 'user'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND modificationTime<:modificationTime ORDER BY PandaID",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for Anal Active (%s)" % str(jobs))
# kill too long pending jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'pending'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Pending (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kick waiting ES merge jobs which were generated from fake co-jumbo
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':esMerge'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID,computingSite FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND eventService=:esMerge ORDER BY jediTaskID "
status,res = taskBuffer.querySQLS(sql, varMap)
jobsMap = {}
if res is not None:
for id,site in res:
if site not in jobsMap:
jobsMap[site] = []
jobsMap[site].append(id)
# kick
if len(jobsMap):
for site in jobsMap:
jobs = jobsMap[site]
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("kick waiting ES merge (%s)" % str(jobs[iJob:iJob+nJob]))
Client.reassignJobs(jobs[iJob:iJob+nJob], )
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND (eventService IS NULL OR eventService<>:coJumbo) "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Waiting (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kill too long running ES jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esJob'] = EventServiceUtils.esJobFlagNumber
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService IN (:esJob,:coJumbo) AND currentPriority>=900 "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2, keepUnmerged=True, jobSubStatus='es_toolong')
iJob += nJob
# kill too long running ES merge jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esMergeJob'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService=:esMergeJob "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES merge jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2)
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE ((creationTime<:timeLimit AND (eventService IS NULL OR eventService<>:coJumbo)) "
sql += "OR modificationTime<:timeLimit) "
varMap = {}
varMap[':timeLimit'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,4)
_logger.debug("killJobs in jobsWaiting (%s)" % str(jobs))
# rebrokerage
_logger.debug("Rebrokerage start")
# get timeout value
timeoutVal = taskBuffer.getConfigValue('rebroker','ANALY_TIMEOUT')
if timeoutVal is None:
timeoutVal = 12
_logger.debug("timeout value : {0}h".format(timeoutVal))
try:
normalTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeoutVal)
sortTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
sql = "WITH p AS ("\
"SELECT MIN(PandaID) PandaID,jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "\
"FROM ATLAS_PANDA.jobsActive4 "\
"WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "\
"AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3) "\
"AND jobsetID IS NOT NULL AND lockedBy=:lockedBy "\
"GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "\
") "\
"SELECT /*+ INDEX (s JOBS_STATUSLOG_PANDAID_IDX) */ "\
"p.jobDefinitionID,p.prodUserName,p.prodUserID,p.computingSite,s.modificationTime,p.jediTaskID,p.processingType " \
"FROM p, ATLAS_PANDA.jobs_statuslog s "\
"WHERE s.PandaID=p.PandaID AND s.jobStatus=:s_jobStatus AND s.modificationTime<:modificationTime "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'dummy'
varMap[':jobStatus3'] = 'starting'
varMap[':s_jobStatus'] = 'activated'
# get jobs older than threshold
ret,res = taskBuffer.querySQLS(sql, varMap)
resList = []
keyList = set()
if res is not None:
for tmpItem in res:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType = tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
keyList.add(tmpKey)
resList.append(tmpItem)
# get stalled assigned job
sqlA = "SELECT jobDefinitionID,prodUserName,prodUserID,computingSite,MAX(creationTime),jediTaskID,processingType "
sqlA += "FROM ATLAS_PANDA.jobsDefined4 "
sqlA += "WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) AND jobStatus IN (:jobStatus1,:jobStatus2) "
sqlA += "AND creationTime<:modificationTime AND lockedBy=:lockedBy "
sqlA += "GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'assigned'
varMap[':jobStatus2'] = 'defined'
retA,resA = taskBuffer.querySQLS(sqlA, varMap)
if resA is not None:
for tmpItem in resA:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType = tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
if tmpKey not in keyList:
keyList.add(tmpKey)
resList.append(tmpItem)
# sql to check recent activity
sql = "SELECT PandaID,stateChangeTime,jobStatus FROM %s "
sql += "WHERE prodUserName=:prodUserName AND jobDefinitionID=:jobDefinitionID "
sql += "AND computingSite=:computingSite AND jediTaskID=:jediTaskID "
sql += "AND jobStatus NOT IN (:jobStatus1,:jobStatus2,:jobStatus3) "
sql += "AND stateChangeTime>:modificationTime "
sql += "AND rownum <= 1"
# sql to get associated jobs with jediTaskID
sqlJJ = "SELECT PandaID FROM %s "
sqlJJ += "WHERE jediTaskID=:jediTaskID AND jobStatus IN (:jobS1,:jobS2,:jobS3,:jobS4,:jobS5) "
sqlJJ += "AND jobDefinitionID=:jobDefID AND computingSite=:computingSite "
if resList != []:
recentRuntimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# loop over all user/jobID combinations
iComb = 0
nComb = len(resList)
_logger.debug("total combinations = %s" % nComb)
for jobDefinitionID,prodUserName,prodUserID,computingSite,maxModificationTime,jediTaskID,processingType in resList:
# check if jobs with the jobID have run recently
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
varMap[':modificationTime'] = recentRuntimeLimit
varMap[':jobStatus1'] = 'closed'
varMap[':jobStatus2'] = 'failed'
varMap[':jobStatus3'] = 'starting'
_logger.debug(" rebro:%s/%s:ID=%s:%s jediTaskID=%s site=%s" % (iComb,nComb,jobDefinitionID,
prodUserName,jediTaskID,
computingSite))
iComb += 1
hasRecentJobs = False
# check site
if not siteMapper.checkSite(computingSite):
_logger.debug(" -> skip unknown site=%s" % computingSite)
continue
# check site status
tmpSiteStatus = siteMapper.getSite(computingSite).status
if tmpSiteStatus not in ['offline','test']:
# use normal time limit for normal site status
if maxModificationTime > normalTimeLimit:
_logger.debug(" -> skip wait for normal timelimit=%s<maxModTime=%s" % (normalTimeLimit,maxModificationTime))
continue
for tableName in ['ATLAS_PANDA.jobsActive4','ATLAS_PANDA.jobsArchived4']:
retU,resU = taskBuffer.querySQLS(sql % tableName, varMap)
if resU is None:
# database error
raise RuntimeError("failed to check modTime")
if resU != []:
# found recent jobs
hasRecentJobs = True
_logger.debug(" -> skip due to recent activity %s to %s at %s" % (resU[0][0],
resU[0][2],
resU[0][1]))
break
else:
_logger.debug(" -> immediate rebro due to site status=%s" % tmpSiteStatus)
if hasRecentJobs:
# skip since some jobs have run recently
continue
else:
if jediTaskID is None:
_logger.debug(" -> rebro for normal task : no action")
else:
_logger.debug(" -> rebro for JEDI task")
killJobs = []
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':jobDefID'] = jobDefinitionID
varMap[':computingSite'] = computingSite
varMap[':jobS1'] = 'defined'
varMap[':jobS2'] = 'assigned'
varMap[':jobS3'] = 'activated'
varMap[':jobS4'] = 'dummy'
varMap[':jobS5'] = 'starting'
for tableName in ['ATLAS_PANDA.jobsDefined4','ATLAS_PANDA.jobsActive4']:
retJJ,resJJ = taskBuffer.querySQLS(sqlJJ % tableName, varMap)
for tmpPandaID, in resJJ:
killJobs.append(tmpPandaID)
# reverse sort to kill buildJob in the end
killJobs.sort()
killJobs.reverse()
# kill to reassign
taskBuffer.killJobs(killJobs,'JEDI','51',True)
except Exception as e:
_logger.error("rebrokerage failed with {0} : {1}".format(str(e), traceback.format_exc()))
# kill too long running jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=21)
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
# set tobekill
_logger.debug('killJobs for Running (%s)' % jobs[iJob:iJob+nJob])
Client.killJobs(jobs[iJob:iJob+nJob],2)
# run watcher
for id in jobs[iJob:iJob+nJob]:
thr = Watcher(taskBuffer,id,single=True,sitemapper=siteMapper,sleepTime=60*24*21)
thr.start()
thr.join()
time.sleep(1)
iJob += nJob
time.sleep(10)
# kill too long waiting ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=5)
varMap = {}
varMap[':prodSourceLabel'] = 'ddm'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for DDM (%s)" % str(jobs))
# kill too long throttled jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'throttled'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime ",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for throttled (%s)" % str(jobs))
# check if merge job is valid
_logger.debug('kill invalid pmerge')
varMap = {}
varMap[':processingType'] = 'pmerge'
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
sql = "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsDefined4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
sql += "UNION "
sql += "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsActive4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
status,res = taskBuffer.querySQLS(sql,varMap)
nPmerge = 0
badPmerge = 0
_logger.debug('check {0} pmerge'.format(len(res)))
for pandaID,jediTaskID in res:
nPmerge += 1
isValid,tmpMsg = taskBuffer.isValidMergeJob(pandaID,jediTaskID)
if isValid is False:
_logger.debug("kill pmerge {0} since {1} gone".format(pandaID,tmpMsg))
taskBuffer.killJobs([pandaID],'killed since pre-merge job {0} gone'.format(tmpMsg),
'52',True)
badPmerge += 1
_logger.debug('killed invalid pmerge {0}/{1}'.format(badPmerge,nPmerge))
# cleanup of jumbo jobs
_logger.debug('jumbo job cleanup')
res = taskBuffer.cleanupJumboJobs()
_logger.debug(res)
_memoryCheck("delete XML")
# delete old files in DA cache
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
files = os.listdir(panda_config.cache_dir)
for file in files:
# skip special test file
if file == 'sources.72c48dc5-f055-43e5-a86e-4ae9f8ea3497.tar.gz':
continue
if file == 'sources.090f3f51-fc81-4e80-9749-a5e4b2bd58de.tar.gz':
continue
try:
# get timestamp
timestamp = datetime.datetime.fromtimestamp(os.stat('%s/%s' % (panda_config.cache_dir,file)).st_mtime)
# delete
if timestamp < timeLimit:
_logger.debug("delete %s " % file)
os.remove('%s/%s' % (panda_config.cache_dir,file))
except Exception:
pass
_memoryCheck("delete core")
# delete core
dirName = '%s/..' % panda_config.logdir
for file in os.listdir(dirName):
if file.startswith('core.'):
_logger.debug("delete %s " % file)
try:
os.remove('%s/%s' % (dirName,file))
except Exception:
pass
# update email DB
_memoryCheck("email")
_logger.debug("Update emails")
# lock file
_lockGetMail = open(panda_config.lockfile_getMail, 'w')
# lock email DB
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_EX)
# open email DB
pDB = shelve.open(panda_config.emailDB)
# read
mailMap = {}
for name in pDB:
addr = pDB[name]
mailMap[name] = addr
# close DB
pDB.close()
# release file lock
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_UN)
# set email address
for name in mailMap:
addr = mailMap[name]
# remove _
name = re.sub('_$','',name)
status,res = taskBuffer.querySQLS("SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:name",{':name':name})
# failed or not found
if status == -1 or len(res) == 0:
_logger.error("%s not found in user DB" % name)
continue
# already set
if res[0][0] not in ['','None',None]:
continue
# update email
_logger.debug("set '%s' to %s" % (name,addr))
status,res = taskBuffer.querySQLS("UPDATE ATLAS_PANDAMETA.users SET email=:addr WHERE name=:name",{':addr':addr,':name':name})
# sandbox
_logger.debug("Touch sandbox")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
sqlC = "SELECT hostName,fileName,creationTime,userName FROM ATLAS_PANDAMETA.userCacheUsage "\
"WHERE creationTime>:timeLimit AND creationTime>modificationTime "\
"AND (fileName like 'sources%' OR fileName like 'jobO%') "
sqlU = "UPDATE ATLAS_PANDAMETA.userCacheUsage SET modificationTime=CURRENT_DATE "\
"WHERE userName=:userName AND fileName=:fileName "
status, res = taskBuffer.querySQLS(sqlC, {':timeLimit': timeLimit})
if res is None:
_logger.error("failed to get files")
elif len(res) > 0:
_logger.debug("{0} files to touch".format(len(res)))
for hostName, fileName, creationTime, userName in res:
base_url = 'https://{0}:{1}'.format(hostName, panda_config.pserverport)
_logger.debug("touch {0} on {1} created at {2}".format(fileName, hostName, creationTime))
s,o = Client.touchFile(base_url, fileName)
_logger.debug(o)
if o == 'True':
varMap = dict()
varMap[':userName'] = userName
varMap[':fileName'] = fileName
taskBuffer.querySQLS(sqlU, varMap)
_logger.debug("Check sandbox")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
expireLimit = datetime.datetime.utcnow() - datetime.timedelta(days=30)
sqlD = "DELETE FROM ATLAS_PANDAMETA.userCacheUsage WHERE userName=:userName AND fileName=:fileName "
nRange = 100
for i in range(nRange):
_logger.debug("{0}/{1} {2} files to check".format(nRange, i, len(res)))
res = taskBuffer.getLockSandboxFiles(timeLimit, 1000)
if res is None:
_logger.error("failed to get files")
break
elif len(res) == 0:
break
for userName, hostName, fileName, creationTime, modificationTime in res:
url = 'https://{0}:{1}/cache/{2}'.format(hostName, panda_config.pserverport, fileName)
_logger.debug("checking {0} created at {1}".format(url, creationTime))
toDelete = False
try:
x = requests.head(url, verify=False)
_logger.debug("code {0}".format(x.status_code))
if x.status_code == 404:
_logger.debug("delete")
toDelete = True
except Exception as e:
_logger.debug("failed with {0}".format(str(e)))
if creationTime < expireLimit:
toDelete = True
_logger.debug("delete due to creationTime={0}".format(creationTime))
# update or delete
varMap = dict()
varMap[':userName'] = userName
varMap[':fileName'] = fileName
if toDelete:
taskBuffer.querySQLS(sqlD, varMap)
else:
_logger.debug("keep")
_memoryCheck("end")
_logger.debug("===================== end =====================")
# run
if __name__ == '__main__':
main(argv=sys.argv)
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tests/test_msgpack/test_except.py | 15 | 1043 | #!/usr/bin/env python
# coding: utf-8
import unittest
import nose
import datetime
from pandas.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(unittest.TestCase):
def test_raise_on_find_unsupported_value(self):
import datetime
self.assertRaises(TypeError, packb, datetime.datetime.now())
def test_raise_from_object_hook(self):
def hook(obj):
raise DummyException
self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
def test_invalidvalue(self):
self.assertRaises(ValueError, unpackb, b'\xd9\x97#DL_')
| gpl-3.0 |
rmcgibbo/scipy | scipy/stats/_discrete_distns.py | 15 | 20781 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
linebp/pandas | pandas/tests/reshape/test_reshape.py | 1 | 43476 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame, Series
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas.core.reshape.reshape import (
melt, lreshape, get_dummies, wide_to_long)
import pandas.util.testing as tm
from pandas.compat import range, u
class TestMelt(object):
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df['id1'] = (self.df['A'] > 0).astype(np.int64)
self.df['id2'] = (self.df['B'] > 0).astype(np.int64)
self.var_name = 'var'
self.value_name = 'val'
self.df1 = pd.DataFrame([[1.067683, -1.110463, 0.20867
], [-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361]])
self.df1.columns = [list('ABC'), list('abc')]
self.df1.columns.names = ['CAP', 'low']
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ['variable', 'value']
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(),
melt(self.df))
tm.assert_frame_equal(self.df.melt(id_vars=['id1', 'id2'],
value_vars=['A', 'B']),
melt(self.df,
id_vars=['id1', 'id2'],
value_vars=['A', 'B']))
tm.assert_frame_equal(self.df.melt(var_name=self.var_name,
value_name=self.value_name),
melt(self.df,
var_name=self.var_name,
value_name=self.value_name))
tm.assert_frame_equal(self.df1.melt(col_level=0),
melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ['variable', 'value']
result1 = self.df.melt(id_vars=['id1'])
assert result1.columns.tolist() == ['id1', 'variable', 'value']
result2 = self.df.melt(id_vars=['id1', 'id2'])
assert result2.columns.tolist() == ['id1', 'id2', 'variable', 'value']
def test_value_vars(self):
result3 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A')
assert len(result3) == 10
result4 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'])
expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=['id1', 'id2'],
value_vars=type_(('A', 'B')))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame({
('A', 'a'): self.df1[('A', 'a')],
'CAP': ['B'] * len(self.df1),
'low': ['b'] * len(self.df1),
'value': self.df1[('B', 'b')],
}, columns=[('A', 'a'), 'CAP', 'low', 'value'])
result = self.df1.melt(id_vars=[('A', 'a')], value_vars=[('B', 'b')])
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ('A', 'a')
list_a = [tuple_a]
tuple_b = ('B', 'b')
list_b = [tuple_b]
for id_vars, value_vars in ((tuple_a, list_b), (list_a, tuple_b),
(tuple_a, tuple_b)):
with tm.assert_raises_regex(ValueError, r'MultiIndex'):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ['var', 'value']
result6 = self.df.melt(id_vars=['id1'], var_name=self.var_name)
assert result6.columns.tolist() == ['id1', 'var', 'value']
result7 = self.df.melt(id_vars=['id1', 'id2'], var_name=self.var_name)
assert result7.columns.tolist() == ['id1', 'id2', 'var', 'value']
result8 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name)
assert result8.columns.tolist() == ['id1', 'id2', 'var', 'value']
result9 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name)
expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name, 'value'])
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ['variable', 'val']
result11 = self.df.melt(id_vars=['id1'], value_name=self.value_name)
assert result11.columns.tolist() == ['id1', 'variable', 'val']
result12 = self.df.melt(id_vars=['id1', 'id2'],
value_name=self.value_name)
assert result12.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result13 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
value_name=self.value_name)
assert result13.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result14 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
value_name=self.value_name)
expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable',
self.value_name])
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name,
value_name=self.value_name)
assert result15.columns.tolist() == ['var', 'val']
result16 = self.df.melt(id_vars=['id1'], var_name=self.var_name,
value_name=self.value_name)
assert result16.columns.tolist() == ['id1', 'var', 'val']
result17 = self.df.melt(id_vars=['id1', 'id2'],
var_name=self.var_name,
value_name=self.value_name)
assert result17.columns.tolist() == ['id1', 'id2', 'var', 'val']
result18 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name,
value_name=self.value_name)
assert result18.columns.tolist() == ['id1', 'id2', 'var', 'val']
result19 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name,
value_name=self.value_name)
expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name,
self.value_name])
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = 'foo'
result20 = df20.melt()
assert result20.columns.tolist() == ['foo', 'value']
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level='CAP')
assert res1.columns.tolist() == ['CAP', 'value']
assert res2.columns.tolist() == ['CAP', 'value']
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ['CAP', 'low', 'value']
class TestGetDummies(object):
sparse = False
def setup_method(self, method):
self.df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
def test_basic(self):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': {0: 1,
1: 0,
2: 0},
'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
assert_frame_equal(get_dummies(s_list, sparse=self.sparse), expected)
assert_frame_equal(get_dummies(s_series, sparse=self.sparse), expected)
expected.index = list('ABC')
assert_frame_equal(
get_dummies(s_series_index, sparse=self.sparse), expected)
def test_basic_types(self):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype='uint8',
columns=list('abc'))
if not self.sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_series, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_df, sparse=self.sparse, columns=s_df.columns)
tm.assert_series_equal(result.get_dtype_counts(),
Series({'uint8': 8}))
result = get_dummies(s_df, sparse=self.sparse, columns=['a'])
expected = Series({'uint8': 3, 'int64': 1, 'object': 1}).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
def test_just_na(self):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=self.sparse)
res_series = get_dummies(just_na_series, sparse=self.sparse)
res_series_index = get_dummies(just_na_series_index,
sparse=self.sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=self.sparse)
exp = DataFrame({'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=self.sparse)
exp_na = DataFrame({nan: {0: 0, 1: 0, 2: 1},
'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}},
dtype=np.uint8)
exp_na = exp_na.reindex_axis(['a', 'b', nan], 1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=np.uint8)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self
): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=self.sparse)
exp = DataFrame({'letter_e': {0: 1,
1: 0,
2: 0},
u('letter_%s') % eacute: {0: 0,
1: 1,
2: 1}},
dtype=np.uint8)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self):
df = self.df
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self):
prefixes = ['from_A', 'from_B']
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'from_A_a', 'from_A_b', 'from_B_b',
'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self):
# not that you should do this...
df = self.df
result = get_dummies(df, prefix='bad', sparse=self.sparse)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C', 'bad_a', 'bad_b', 'bad_b', 'bad_c'],
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self):
df = self.df
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self):
df = self.df
result = get_dummies(df, prefix_sep='..', sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]})
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=self.sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..',
'B': '__'}, sparse=self.sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix=['too few'], sparse=self.sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix_sep=['bad'], sparse=self.sparse)
def test_dataframe_dummies_prefix_dict(self):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': [1, 0, 1, 0],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_b': [1, 1, 0, 0],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'A_nan',
'B_b', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1],
'cat_x': [1, 0, 0],
'cat_y': [0, 1, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c', 'cat_x', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c',
'cat_x', 'cat_y']]
assert_frame_equal(result, expected)
def test_basic_drop_first(self):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self):
# Test NA hadling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, sparse=self.sparse, drop_first=True)
exp = DataFrame({'b': {0: 0,
1: 1,
2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_na = DataFrame({'b': {0: 0,
1: 1,
2: 0},
nan: {0: 0,
1: 0,
2: 1}}, dtype=np.uint8).reindex_axis(
['b', nan], 1)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse,
drop_first=True)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'A_nan', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse,
drop_first=True)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(np.uint8)
result = pd.get_dummies(data, columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat)
data = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.uint8)
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols)
tm.assert_frame_equal(result, expected)
class TestGetDummiesSparse(TestGetDummies):
sparse = True
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
class TestLreshape(object):
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt1': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009'],
'visitdt2':
['21jan2009', nan, '22jan2009', '31dec2008', '03feb2009'],
'visitdt3': ['05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt1': [1823, 3338, 1549, 3298, 4306],
'wt2': [2011.0, nan, 1892.0, 3338.0, 4575.0],
'wt3': [2293.0, nan, nan, 3377.0, 4805.0]}
df = DataFrame(data)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 4)],
'wt': ['wt%d' % i for i in range(1, 4)]}
result = lreshape(df, spec)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 1454, 3139,
4133, 1766, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 103, 104, 105, 101,
104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Male',
'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009',
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
1892.0, 3338.0, 4575.0, 2293.0, 3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009', '08jan2009', '20dec2008',
'30dec2008', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 3301, 1454,
3139, 4133, 1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 102, 103, 104, 105,
101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009', nan,
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', nan, nan, '02jan2009',
'15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0, nan,
1892.0, 3338.0, 4575.0, 2293.0, nan, nan, 3377.0,
4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)],
'wt': ['wt%d' % i for i in range(1, 4)]}
pytest.raises(ValueError, lreshape, df, spec)
class TestWideToLong(object):
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A1970": {0: "a",
1: "b",
2: "c"},
"A1980": {0: "d",
1: "e",
2: "f"},
"B1970": {0: 2.5,
1: 1.2,
2: .7},
"B1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2']
stubs = ['inc', 'edu']
# TODO: unused?
df_long = pd.wide_to_long(df, stubs, i='id', j='age') # noqa
assert stubs == ['inc', 'edu']
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A.1970": {0: "a",
1: "b",
2: "c"},
"A.1980": {0: "d",
1: "e",
2: "f"},
"B.1970": {0: 2.5,
1: 1.2,
2: .7},
"B.1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(long_frame, exp_frame)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A(quarterly)1970": {0: "a",
1: "b",
2: "c"},
"A(quarterly)1980": {0: "d",
1: "e",
2: "f"},
"B(quarterly)1970": {0: 2.5,
1: 1.2,
2: .7},
"B(quarterly)1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A(quarterly)": ['a', 'b', 'c', 'd', 'e', 'f'],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(
['id', 'year'])[["X", "A(quarterly)", "B(quarterly)"]]
long_frame = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': ['X1', 'X1', 'X2', 'X2'],
'A': [1.0, 3.0, 2.0, 4.0],
'B': [5.0, np.nan, 6.0, np.nan],
'id': [0, 0, 1, 1],
'year': ['2010', '2011', '2010', '2011']}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame, exp_frame)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'BBBX': [91, 92, 93],
'BBBZ': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'BBBX': [91, 92, 93, 91, 92, 93],
'BBBZ': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['BBBX', 'BBBZ', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = 'nope!'
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'A2010': [],
'A2011': [],
'B2010': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'A2010', 'A2011', 'B2010', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'Arating': [91, 92, 93],
'Arating_old': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'Arating': [91, 92, 93, 91, 92, 93],
'Arating_old': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['Arating', 'Arating_old', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = pd.DataFrame({'Aone': [1.0, 2.0],
'Atwo': [3.0, 4.0],
'Bone': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'Aone': [],
'Atwo': [],
'Bone': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'Aone', 'Atwo', 'Bone', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = pd.DataFrame({
'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
})
exp_frame = pd.DataFrame({
'ht': [2.8, 3.4, 2.9, 3.8, 2.2, 2.9, 2.0, 3.2, 1.8,
2.8, 1.9, 2.4, 2.2, 3.3, 2.3, 3.4, 2.1, 2.9],
'famid': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
'birth': [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
'age': ['1', '2', '1', '2', '1', '2', '1', '2', '1',
'2', '1', '2', '1', '2', '1', '2', '1', '2']
})
exp_frame = exp_frame.set_index(['famid', 'birth', 'age'])[['ht']]
long_frame = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
tm.assert_frame_equal(long_frame, exp_frame)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = pd.DataFrame({
'A_A1': [1, 2, 3, 4, 5],
'B_B1': [1, 2, 3, 4, 5],
'x': [1, 1, 1, 1, 1]
})
with pytest.raises(ValueError):
wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname')
| bsd-3-clause |
xclxxl414/rqalpha | rqalpha/mod/rqalpha_mod_alphaStar_utils/mod.py | 1 | 2371 | #coding=utf-8
"""
@author: evilXu
@file: mod.py
@time: 2018/2/28 16:59
@description:
"""
from rqalpha.interface import AbstractMod
from rqalpha.utils.logger import system_log,user_system_log
import pandas as pd
from rqalpha.api import *
class UtilsMod(AbstractMod):
def __init__(self):
self._inject_api()
def start_up(self, env, mod_config):
system_log.debug("UtilsMod.start_up,config:{0}",mod_config)
def tear_down(self, code, exception=None):
pass
# print(">>> AlphaHDataMode.tear_down")
def _inject_api(self):
from rqalpha import export_as_api
from rqalpha.execution_context import ExecutionContext
from rqalpha.const import EXECUTION_PHASE
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def equalWeight_order(tobe_holding_codes=[], context=None):
user_system_log.info("equalWeight_order:{}",str(tobe_holding_codes))
if len(tobe_holding_codes) < 1:
for code, pos in context.portfolio.positions.items():
if pos.sellable > 0:
order_shares(code, -1 * pos.sellable)
return
# print("positions",context.portfolio.positions)
_target_percent = round(1.0 / len(tobe_holding_codes), 2)
_targets = set(tobe_holding_codes)
_tobe_sell = [pos for code, pos in context.portfolio.positions.items() if code not in _targets]
for pos in _tobe_sell:
if pos.sellable > 0:
order_shares(pos.order_book_id, -1 * pos.sellable)
for code in tobe_holding_codes:
_acount = context.portfolio.stock_account
_cash_percent = round(_acount.cash / _acount.total_value, 2)
_real_percent = min(_cash_percent, _target_percent)
# print(_acount.cash,_acount.total_value,_cash_percent,_real_percent)
if _real_percent > 0:
order_target_percent(code, _real_percent)
return | apache-2.0 |
Arn-O/kadenze-deep-creative-apps | session-5/libs/inception.py | 13 | 4890 | """
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import os
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize as imresize
from .utils import download_and_extract_tar, download_and_extract_zip
def inception_download(data_dir='inception', version='v5'):
"""Download a pretrained inception network.
Parameters
----------
data_dir : str, optional
Location of the pretrained inception network download.
version : str, optional
Version of the model: ['v3'] or 'v5'.
"""
if version == 'v3':
download_and_extract_tar(
'https://s3.amazonaws.com/cadl/models/inception-2015-12-05.tgz',
data_dir)
return (os.path.join(data_dir, 'classify_image_graph_def.pb'),
os.path.join(data_dir, 'imagenet_synset_to_human_label_map.txt'))
else:
download_and_extract_zip(
'https://s3.amazonaws.com/cadl/models/inception5h.zip', data_dir)
return (os.path.join(data_dir, 'tensorflow_inception_graph.pb'),
os.path.join(data_dir, 'imagenet_comp_graph_label_strings.txt'))
def get_inception_model(data_dir='inception', version='v5'):
"""Get a pretrained inception network.
Parameters
----------
data_dir : str, optional
Location of the pretrained inception network download.
version : str, optional
Version of the model: ['v3'] or 'v5'.
Returns
-------
net : dict
{'graph_def': graph_def, 'labels': synsets}
where the graph_def is a tf.GraphDef and the synsets
map an integer label from 0-1000 to a list of names
"""
# Download the trained net
model, labels = inception_download(data_dir, version)
# Parse the ids and synsets
txt = open(labels).readlines()
synsets = [(key, val.strip()) for key, val in enumerate(txt)]
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
return {
'graph_def': graph_def,
'labels': synsets,
'preprocess': preprocess,
'deprocess': deprocess
}
def preprocess(img, crop=True, resize=True, dsize=(299, 299)):
if img.dtype != np.uint8:
img *= 255.0
if crop:
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
else:
cropped = img
if resize:
rsz = imresize(cropped, dsize, preserve_range=True)
else:
rsz = cropped
if rsz.ndim == 2:
rsz = rsz[..., np.newaxis]
rsz = rsz.astype(np.float32)
# subtract imagenet mean
return (rsz - 117)
def deprocess(img):
return np.clip(img + 117, 0, 255).astype(np.uint8)
def test_inception():
"""Loads the inception network and applies it to a test image.
"""
with tf.Session() as sess:
net = get_inception_model()
tf.import_graph_def(net['graph_def'], name='inception')
g = tf.get_default_graph()
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
softmax = g.get_tensor_by_name(names[-3] + ':0')
from skimage import data
img = preprocess(data.coffee())[np.newaxis]
res = np.squeeze(softmax.eval(feed_dict={x: img}))
print([(res[idx], net['labels'][idx])
for idx in res.argsort()[-5:][::-1]])
"""Let's visualize the network's gradient activation
when backpropagated to the original input image. This
is effectively telling us which pixels contribute to the
predicted class or given neuron"""
pools = [name for name in names if 'pool' in name.split('/')[-1]]
fig, axs = plt.subplots(1, len(pools))
for pool_i, poolname in enumerate(pools):
pool = g.get_tensor_by_name(poolname + ':0')
pool.get_shape()
neuron = tf.reduce_max(pool, 1)
saliency = tf.gradients(neuron, x)
neuron_idx = tf.arg_max(pool, 1)
this_res = sess.run([saliency[0], neuron_idx],
feed_dict={x: img})
grad = this_res[0][0] / np.max(np.abs(this_res[0]))
axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
axs[pool_i].set_title(poolname)
| apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/deap-master/examples/coev/coop_evol.py | 1 | 6195 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the evolving test from *Potter, M. and De Jong, K.,
2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.4. The number of species is evolved by adding and
removing species as stagnation occurs.
"""
import random
try:
import matplotlib.pyplot as plt
plt.figure()
except:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
NUM_SPECIES = 1
TARGET_SIZE = 30
IMPROVMENT_TRESHOLD = 0.5
IMPROVMENT_LENGTH = 5
EXTINCTION_TRESHOLD = 5.0
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
toolbox.register("evaluateContribution", coop_base.matchSetContribution)
def main(extended=True, verbose=True):
target_set = []
species = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 300
g = 0
for i in range(len(schematas)):
size = int(TARGET_SIZE / len(schematas))
target_set.extend(toolbox.target_set(schematas[i], size))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
species_index = list(range(NUM_SPECIES))
last_index_added = species_index[-1]
# Init with random a representative for each species
representatives = [random.choice(species[i]) for i in range(NUM_SPECIES)]
best_fitness_history = [None] * IMPROVMENT_LENGTH
if plt and extended:
contribs = [[]]
stag_gen = []
collab = []
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for (i, s), j in zip(enumerate(species), species_index):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
# Get the representatives excluding the current species
r = representatives[:i] + representatives[i + 1:]
for ind in s:
# Evaluate and set the individual fitness
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=j, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
if plt and extended:
# Book keeping of the collaborative fitness
collab.append(next_repr[i].fitness.values[0])
g += 1
representatives = next_repr
# Keep representatives fitness for stagnation detection
best_fitness_history.pop(0)
best_fitness_history.append(representatives[0].fitness.values[0])
try:
diff = best_fitness_history[-1] - best_fitness_history[0]
except TypeError:
diff = float("inf")
if plt and extended:
for (i, rep), j in zip(enumerate(representatives), species_index):
contribs[j].append((toolbox.evaluateContribution(representatives,
target_set, i)[0], g - 1))
if diff < IMPROVMENT_TRESHOLD:
if len(species) > 1:
contributions = []
for i in range(len(species)):
contributions.append(toolbox.evaluateContribution(representatives, target_set, i)[0])
for i in reversed(range(len(species))):
if contributions[i] < EXTINCTION_TRESHOLD:
species.pop(i)
species_index.pop(i)
representatives.pop(i)
last_index_added += 1
best_fitness_history = [None] * IMPROVMENT_LENGTH
species.append(toolbox.species())
species_index.append(last_index_added)
representatives.append(random.choice(species[-1]))
if extended and plt:
stag_gen.append(g - 1)
contribs.append([])
if extended:
for r in representatives:
# print final representatives without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if extended and plt: # Ploting of the evolution
line1, = plt.plot(collab, "--", color="k")
for con in contribs:
try:
con, g = zip(*con)
line2, = plt.plot(g, con, "-", color="k")
except ValueError:
pass
axis = plt.axis("tight")
for s in stag_gen:
plt.plot([s, s], [0, axis[-1]], "--", color="k")
plt.legend((line1, line2), ("Collaboration", "Contribution"), loc="center right")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.show()
if __name__ == "__main__":
main()
| mit |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/scoretests/svm_model_test.py | 10 | 3519 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""svm model test for scoring"""
import unittest
import os
from sparktkregtests.lib import scoring_utils
from sparktkregtests.lib import sparktk_test
class SvmScoreTest(sparktk_test.SparkTKTestCase):
def lattice2frame(self, matrix):
"""Convert 2D string lattice to data frame."""
# The input matrix is a string lattice with data points marked
# with + and - (2-class model), or with integers (multi-class).
# Any other characters are ignored.
# The lattice's center is taken as the origin.
#
# return: Frame with the positions and values converted to
# SVM input requirements.
# This frame is ready as input to train, test, or predict.
block_data = []
schema = [('x', float),
('y', float),
('model_class', int)]
# Grabbing center column from center row allows for a skew matrix,
# so long as the center row is complete.
origin_y = len(matrix)/2
origin_x = len(matrix[origin_y])/2
# print "L2M TRACE", matrix, "origin at", origin_x, origin_y
for y in range(len(matrix)):
for x in range(len(matrix[y])):
svm_class = None
char = matrix[y][x]
if char == '+':
svm_class = 1
elif char == '-':
svm_class = 0
elif char.isdigit():
svm_class = int(char)
if svm_class is not None:
block_data.append([x-origin_x, origin_y-y, svm_class])
block_data.sort()
if len(block_data) == 0:
frame = None
else:
frame = self.context.frame.create(block_data, schema=schema)
return frame
def test_model_scoring(self):
""" Verify that SvmModel operates as expected. """
# Test set is a 3x3 square lattice of points
# with a fully accurate, linear, unbiased divider.
train_lattice = ["+++",
"++-",
"---"]
training_frame = self.lattice2frame(train_lattice)
svm_model = self.context.models.classification.svm.train(
training_frame, ["x", "y"], u"model_class")
file_name = self.get_name("svm")
model_path = svm_model.export_to_mar(self.get_export_file(file_name))
test_rows = training_frame.to_pandas(training_frame.count())
with scoring_utils.scorer(
model_path, self.id()) as scorer:
for _, i in test_rows.iterrows():
res = scorer.score([dict(zip(["x", "y"], list(i[0:2])))])
self.assertEqual(i[2], res.json()["data"][0]['Prediction'])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
quheng/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/calibration/plot_calibration_multiclass.py | 1 | 7780 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
# plt.show()
pltshow(plt)
| mit |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| agpl-3.0 |
AnthonyHullDiamond/scanning | org.eclipse.scanning.points/scripts/scanpointgenerator/plotgenerator.py | 2 | 4453 | ###
# Copyright (c) 2016, 2017 Diamond Light Source Ltd.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Tom Cobb - initial API and implementation and/or initial documentation
# Gary Yendell - initial API and implementation and/or initial documentation
# Charles Mita - initial API and implementation and/or initial documentation
#
###
from scanpointgenerator import CompoundGenerator, RectangularROI, CircularROI
MARKER_SIZE = 10
def plot_generator(gen, excluder=None, show_indexes=True):
from matplotlib.patches import Rectangle, Circle
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
if excluder is not None:
for roi in excluder.rois:
overlay = plt.subplot(111, aspect='equal')
if isinstance(roi, RectangularROI):
overlay.add_patch(Rectangle(roi.start, roi.width, roi.height, fill=False))
if isinstance(roi, CircularROI):
overlay.add_patch(Circle(roi.centre, roi.radius, fill=False))
if not isinstance(gen, CompoundGenerator):
excluders = [] if excluder is None else [excluder]
gen = CompoundGenerator([gen], excluders, [])
gen.prepare()
# points for spline generation
x, y = [], []
# capture points and indexes
capx, capy, capi = [], [], []
# segment start for colour changing
starts = []
for point in gen.iterator():
# If lower is different from last then include it
xlower = point.lower["x"]
ylower = point.lower.get("y", 0)
if len(x) == 0 or x[-1] != xlower or y[-1] != ylower:
if len(x) != 0:
# add in a tiny fractional distance
xneg = x[-1] - xlower > 0
yneg = y[-1] - ylower > 0
xdiff = (x[-1] - x[-2]) * 0.01
ydiff = (y[-1] - y[-2]) * 0.01
for i in range(3):
x.append(x[-1] + xdiff)
y.append(y[-1] + ydiff)
# add the padding on the input
if xneg:
xdiff *= -1
if yneg:
ydiff *= -1
for i in reversed(range(3)):
x.append(xlower + xdiff * (i + 1))
y.append(ylower + ydiff * (i + 1))
starts.append(len(x))
x.append(xlower)
y.append(ylower)
# Add in capture points
xpos = point.positions["x"]
ypos = point.positions.get("y", 0)
x.append(xpos)
y.append(ypos)
capx.append(xpos)
capy.append(ypos)
capi.append(point.indexes)
# And upper point
starts.append(len(x))
x.append(point.upper["x"])
y.append(point.upper.get("y", 0))
# # Plot labels
plt.xlabel("X (%s)" % gen.units["x"])
if "y" in gen.units:
plt.ylabel("Y (%s)" % gen.units["y"])
else:
plt.tick_params(left='off', labelleft='off')
# Define curves parametrically
x = np.array(x)
y = np.array(y)
t = np.zeros(len(x))
t[1:] = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)
t = np.cumsum(t)
t /= t[-1]
tck, _ = interpolate.splprep([x, y], s=0)
# Plot each line
for i, start in enumerate(starts):
if i + 1 < len(starts):
end = starts[i+1]
else:
end = len(x) - 1
tnew = np.linspace(t[start], t[end], num=1001, endpoint=True)
sx, sy = interpolate.splev(tnew, tck)
plt.plot(sx, sy, linewidth=2)
# And the capture points
plt.plot(capx, capy, linestyle="", marker="x", color="k",
markersize=MARKER_SIZE)
# And a start position
plt.plot([x[0]], [y[0]], 'bo')
plt.annotate("Start", (x[0], y[0]), xytext=(MARKER_SIZE/2, MARKER_SIZE/2),
textcoords='offset points')
# And the indexes
if show_indexes:
for i, x, y in zip(capi, capx, capy):
plt.annotate(i, (x, y), xytext=(MARKER_SIZE/2, MARKER_SIZE/2),
textcoords='offset points')
#indexes = ["%s (size %d)" % z for z in zip(gen.index_names, gen.index_dims)]
#plt.title("Dataset: [%s]" % (", ".join(indexes)))
plt.show()
| epl-1.0 |
anshumang/picongpu-evpath | examples/ThermalTest/tools/dispersion.py | 11 | 2689 | #!/usr/bin/env python
#
# Copyright 2013 Heiko Burau, Axel Huebl
#
# This file is part of PIConGPU.
#
# PIConGPU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PIConGPU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PIConGPU.
# If not, see <http://www.gnu.org/licenses/>.
#
#___________P A R A M E T E R S___________
omega_plasma = 6.718e13 # SI unit: 1/s
v_th = 1.0e8 # SI unit: m/s
c = 2.9979e8 # SI unit: m/s
delta_t = 2.5e-15 # SI unit: s
delta_z = c * delta_t # SI unit: m
#_________________________________________
from numpy import *
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
data_trans = loadtxt("eField_zt_trans.dat")
data_long = loadtxt("eField_zt_long.dat")
N_z = len(data_trans[:,0])
N_t = len(data_trans[0,:])
omega_max = pi*(N_t-1)/(N_t*delta_t)/omega_plasma
k_max = pi * (N_z-1)/(N_z*delta_z)
# __________________transversal plot______________________
ax = plt.subplot(211, autoscale_on=False, xlim=(-k_max, k_max), ylim=(-1, 10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2e'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
plt.xlabel(r"$k [1/m]$")
plt.ylabel(r"$\omega / \omega_{pe} $")
data_trans = fft.fftshift(fft.fft2(data_trans))
plt.imshow(abs(data_trans), extent=(-k_max, k_max, -omega_max, omega_max), aspect='auto', interpolation='nearest')
plt.colorbar()
# plot analytical dispersion relation
x = linspace(-k_max, k_max, 200)
y = sqrt(c**2 * x**2 + omega_plasma**2)/omega_plasma
plt.plot(x, y, 'r--', linewidth=1)
# ___________________longitudinal plot_____________________
ax = plt.subplot(212, autoscale_on=False, xlim=(-k_max, k_max), ylim=(-1, 10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2e'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
plt.xlabel(r"$k [1/m]$")
plt.ylabel(r"$\omega / \omega_{pe} $")
data_long = fft.fftshift(fft.fft2(data_long))
plt.imshow(abs(data_long), extent=(-k_max, k_max, -omega_max, omega_max), aspect='auto', interpolation='nearest')
plt.colorbar()
# plot analytical dispersion relation
x = linspace(-k_max, k_max, 200)
y = sqrt(3 * v_th**2 * x**2 + omega_plasma**2)/omega_plasma
plt.plot(x, y, 'r--', linewidth=1)
plt.show()
| gpl-3.0 |
massmutual/scikit-learn | sklearn/discriminant_analysis.py | 3 | 27324 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
sumspr/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
ovgu-FINken/paparazzi_pre_merge | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
prabhamatta/Analyzing-Open-Data | notebooks/Day_06_B_Generators.py | 3 | 10025 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Goals
# <markdowncell>
# **To practice using generators to yield geographical entities of various types.**
#
# Generators are a bit complicated, and I won't try to explain all the intricacies here. I will show you how to use `yield` in a function definition to return a generator. From [Definition of a generator](http://docs.python.org/2/glossary.html#term-generator):
#
# <blockquote>A function which returns an iterator. It looks like a normal function except that it contains yield statements for producing a series a values usable in a for-loop or that can be retrieved one at a time with the next() function. Each yield temporarily suspends processing, remembering the location execution state (including local variables and pending try-statements). When the generator resumes, it picks-up where it left-off (in contrast to functions which start fresh on every invocation)</blockquote>
#
# For some background on Python generators:
#
# * [iterator - The Python yield keyword explained - Stack Overflow](http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained/231855#231855)
# * [Improve Your Python: 'yield' and Generators Explained](http://www.jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/)
#
# Why use generators: http://stackoverflow.com/a/102632/7782
#
# <blockquote>Generators are good for calculating large sets of results (in particular calculations involving loops themselves) where you don't know if you are going to need all results, or where you don't want to allocate the memory for all results at the same time. </blockquote>
#
# Also, let's also practice using [itertools.islice](http://www.python.org/doc//current/library/itertools.html#itertools.islice) and [enumerate](http://docs.python.org/2/library/functions.html#enumerate) -- two of my favorite constructions in Python
# <markdowncell>
# From http://api.census.gov/data/2010/sf1/geo.html, geographic entities we are specifically interested in this exercise:
#
# * state-county
# * state-county-tract
#
# * state-place
# * state-metropolitan statistical area/micropolitan statistical area
# * state-metropolitan statistical area/micropolitan statistical area-metropolitan division
# * state-combined statistical area
# <codecell>
# usual imports for numpy, pandas, matplotlib
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame, Series, Index
import pandas as pd
# <codecell>
# check that CENSUS_KEY is defined
import census
import us
import settings
assert settings.CENSUS_KEY is not None
# <codecell>
# instantiate our Census object
c = census.Census(key=settings.CENSUS_KEY)
# <headingcell level=1>
# A bit of warmup with Generators
# <codecell>
import string
print list(string.lowercase)
# <codecell>
def abcs():
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z']
"""a generator that returns """
for letter in alphabet:
yield letter
# a generator that gives you the letters of the alphabet a letter at a time
say_abcs = abcs()
# <codecell>
# run this line over and over again to see the letters one at a time
say_abcs.next()
# <codecell>
# you can use list to grab all the items in an iterator. But be careful if the number
# of items is large or even infinite! In this case, we're ok
list(abcs())
# <markdowncell>
# Demonstration of how to use [enumerate](http://docs.python.org/2/library/functions.html#enumerate):
#
# <blockquote>Return an enumerate object. sequence must be a sequence, an iterator, or some other object which supports iteration. The next() method of the iterator returned by enumerate() returns a tuple containing a count (from start which defaults to 0) and the values obtained from iterating over sequence</blockquote>
#
# <codecell>
for (i, letter) in enumerate(abcs()):
print i, letter
# <markdowncell>
# You can use itertools.islice [itertools.islice](http://www.python.org/doc//current/library/itertools.html#itertools.islice) to return parts of the iterator.
#
# <blockquote>Make an iterator that returns selected elements from the iterable. If start is non-zero, then elements from the iterable are skipped until start is reached. Afterward, elements are returned consecutively unless step is set higher than one which results in items being skipped. If stop is None, then iteration continues until the iterator is exhausted, if at all; otherwise, it stops at the specified position. Unlike regular slicing, islice() does not support negative values for start, stop, or step. Can be used to extract related fields from data where the internal structure has been flattened (for example, a multi-line report may list a name field on every third line).</blockquote>
# <codecell>
# let's get the first 10 letters of the alphabet
from itertools import islice
list(islice(abcs(), 10))
# <codecell>
# you can use None to get all items in islice
# from docs: "If stop is None, then iteration continues until the iterator is exhausted,"
list(islice(abcs(), None))
# <codecell>
# itertools.count can in principle generate an infinite sequence
# http://www.python.org/doc//current/library/itertools.html#itertools.count
from itertools import count
# count starting zero
my_counter = count(0)
# <codecell>
# try it out
my_counter.next()
# <codecell>
# DON'T do list(count(0)) -> you'll be trying to generate an infinite list
# but use an upper limit
list(islice(count(0),10))
# <codecell>
# start, stop
list(islice(count(),1,3))
# <headingcell level=1>
# Generator for US Counties
# <codecell>
# get the syntax down for getting counties from CA -- so that then we can use it later
r = c.sf1.get('NAME,P0010001', geo={'for':'county:*',
'in':'state:{fips}'.format(fips=us.states.CA.fips)})
r[:5]
# <markdowncell>
# With the census API, you can get the counties with one single call to the census API or state-by-state. The `counties` generator below takes the first approach while `counties2` takes the second approach. Although `counties` is more efficient in most cases I can think of, it will be useful to know how to do calls on a state-by-state basis. For example, when we to query on a census tract level or below, we will need to work on a state-by-state basis.
# <codecell>
def counties(variables='NAME'):
"""ask for all the states"""
# tabulate a set of fips codes for the states
states_fips = set([s.fips for s in us.states.STATES])
geo={'for':'county:*',
'in':'state:*'}
for county in c.sf1.get(variables, geo=geo):
# eliminate counties whose states aren't in a state or DC
if county['state'] in states_fips:
yield county
def counties2(variables='NAME'):
"""generator for all counties"""
# since we can get all the counties in one call,
# this function is for demonstrating the use of walking through
# the states to get at the counties
for state in us.states.STATES:
geo={'for':'county:*',
'in':'state:{fips}'.format(fips=state.fips)}
for county in c.sf1.get(variables, geo=geo):
yield county
# <codecell>
counties_list = list(counties('NAME,P0010001'))
# <codecell>
# add up the population to make sure we have the total right
counties_df = DataFrame(counties_list)
counties_df.P0010001 = counties_df.P0010001.astype('int')
counties_df.P0010001.sum()
# <markdowncell>
# One reason for writing all the counties in the form of a Python generator is tha you can easily control the number of counties we work with at any given time -- and then easily scaling out to get all of them.
# <codecell>
# make a list of the first ten counties
from itertools import islice
list(islice(counties2(),10))
# <headingcell level=1>
# Generator for Census Tracts
# <markdowncell>
# The following generator loops through all the states to get at the individual counties to then get at the census tracts.
# <codecell>
def tracts(variables='NAME'):
for state in us.states.STATES:
# handy to print out state to monitor progress
print state.fips, state
counties_in_state={'for':'county:*',
'in':'state:{fips}'.format(fips=state.fips)}
for county in c.sf1.get('NAME', geo=counties_in_state):
# print county['state'], county['NAME']
tracts_in_county = {'for':'tract:*',
'in': 'state:{s_fips} county:{c_fips}'.format(s_fips=state.fips,
c_fips=county['county'])}
for tract in c.sf1.get(variables,geo=tracts_in_county):
yield tract
# <codecell>
r = list(islice(tracts('NAME,P0010001'),10))
tracts_df = DataFrame(r)
tracts_df.P0010001 = tracts_df.P0010001.astype('int')
tracts_df['FIPS'] = tracts_df.apply(lambda s: s['state']+s['county']+s['tract'], axis=1)
print "number of tracts", len(tracts_df)
print "total pop", tracts_df.P0010001.sum()
tracts_df.head()
# <markdowncell>
# Good to save the DataFrame so we can load up the census tracts without having call the census api again.
#
# I/O: http://pandas.pydata.org/pandas-docs/dev/io.html
#
# Today, we'll use [pickle format](http://docs.python.org/2/library/pickle.html) and look at other formats.
# <codecell>
TRACT_FILE_PICKLE = "tracts.pickle"
# UNCOMMENT THIS LINE TO SAVE YOUR FILE
# tracts_df.to_pickle(TRACT_FILE_PICKLE)
# <markdowncell>
# Let's read the DataFrame from disk to confirm that we were able to save the file properly.
# <codecell>
df = pd.read_pickle(TRACT_FILE_PICKLE)
df.head()
# <codecell>
# UNCOMMENT TO DO COMPARISON
# you can compare the saved file to the file from disk
# np.all(tracts_df == df)
| apache-2.0 |
billy-inn/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 22 | 18104 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis, last_mean,
last_var, last_n)
assert_equal(X_means_incr.dtype, output_dtype)
assert_equal(X_vars_incr.dtype, output_dtype)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
for index_dtype in [np.int32, np.int64]:
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if index_dtype is np.int64:
X_csr.indptr = X_csr.indptr.astype(index_dtype)
X_csr.indices = X_csr.indices.astype(index_dtype)
assert X_csr.indices.dtype == index_dtype
assert X_csr.indptr.dtype == index_dtype
inplace_csr_row_normalize(X_csr)
assert_equal(X_csr.dtype, dtype)
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
| bsd-3-clause |
mblondel/scikit-learn | sklearn/utils/tests/test_utils.py | 23 | 6045 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
"""Check the check_random_state utility function behavior"""
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
"""Border case not worth mentioning in doctests"""
assert_true(resample() is None)
def test_deprecated():
"""Test whether the deprecated decorator issues appropriate warnings"""
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
| bsd-3-clause |
mjvakili/ccppabc | ccppabc/code/archive/wp_covariance.py | 1 | 1717 | from halotools.empirical_models import Zheng07 , model_defaults
from halotools.mock_observables import wp
from halotools.mock_observables.clustering import tpcf
from halotools.empirical_models.mock_helpers import (three_dim_pos_bundle,
infer_mask_from_kwargs)
from halotools.mock_observables.clustering import wp
from halotools.sim_manager import supported_sims
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
import time
import numpy as np
model = Zheng07()
xir = []
for i in range(500):
model.populate_mock()
xir.append(model.mock.compute_galaxy_clustering()[1])
covar = np.cov(np.array(xir).T)
np.savetxt("clustering_covariance_Mr20.dat" , covar)
"""
a = time.time()
model.mock.compute_galaxy_clustering()
print time.time() - a
rbins = model_defaults.default_rbins
rbin_centers = (rbins[1:] + rbins[:-1])/2.
cat = supported_sims.HaloCatalog()
l = cat.Lbox
print l
p_bins = np.linspace(0,l/2,200)
mask = infer_mask_from_kwargs(model.mock.galaxy_table)
pos = three_dim_pos_bundle(table=model.mock.galaxy_table,
key1='x', key2='y', key3='z', mask=mask,
return_complement=False)
figure = plt.figure(figsize=(10,10))
cl = wp(pos , rbins, p_bins , period = l , estimator = 'Landy-Szalay')
for n_pbins in np.array([2,8,16]):
p_bins = np.linspace(0 , l/2 , n_pbins)
a = time.time()
clustering = wp(pos, rbins, p_bins , period = l , estimator = 'Landy-Szalay')
print time.time() - a
plt.plot(rbin_centers , (clustering)/cl , label = "$N\pi_{bin}$="+str(n_pbins) , lw = 2)
plt.xscale("Log")
plt.yscale("Log")
plt.legend()
plt.savefig("/home/mj/public_html/wpex.png")"""
| mit |
dgrtwo/gleam | examples/baseball.py | 1 | 2364 | import os
from collections import OrderedDict
from flask import Flask
from wtforms import fields
from ggplot import (aes, stat_smooth, geom_point, geom_text, ggtitle, ggplot,
xlab, ylab)
import numpy as np
import pandas as pd
from gleam import Page, panels
# setup
stats = ['At-Bats (AB)', 'Runs (R)', 'Hits (H)', 'Doubles (2B)',
'Triples (3B)', 'Home Runs (HR)', 'Runs Batted In (RBI)',
'Stolen Bases (SB)', 'Caught Stealing (CS)', 'Walks (BB)',
'Intentional Walk (IBB)', 'Salary', 'Attendance']
statchoices = [(s, s) for s in stats]
dir = os.path.split(__file__)[0]
players = pd.read_csv(os.path.join(dir, "baseball_data", "players.csv"))
teams = pd.read_csv(os.path.join(dir, "baseball_data", "teams.csv"))
class BaseballInput(panels.InputPanel):
xvar = fields.SelectField(label="X axis", choices=statchoices,
default="Hits (H)")
yvar = fields.SelectField(label="Y axis", choices=statchoices,
default="Runs (R)")
year = fields.IntegerField(label="Year", default=2013)
linear = fields.BooleanField(label="Linear Fit")
shownames = fields.BooleanField(label="Show Names")
class DataScatter(panels.PlotPanel):
height = 500
width = 700
def __init__(self, name, dat, ID_col):
self.name = name
self.dat = dat
self.ID_col = ID_col
panels.PlotPanel.__init__(self)
def plot(self, inputs):
"""Plot the given X and Y axes on a scatter plot"""
if inputs.year not in self.dat.Year.values:
return
if inputs.xvar not in self.dat or inputs.yvar not in self.dat:
return
subdat = self.dat[self.dat.Year == inputs.year]
p = ggplot(subdat, aes(x=inputs.xvar, y=inputs.yvar))
p = p + geom_point()
if inputs.shownames:
p = p + geom_text(aes(label=self.ID_col), vjust=1, hjust=1)
if inputs.linear:
p = p + stat_smooth(color="red", method="lm")
return p
class BaseballGleam(Page):
title = "Baseball Statistics"
input = BaseballInput()
output = panels.TabPanel([DataScatter("Teams", teams, "teamID"),
DataScatter("Players", players, "name")])
app = Flask("BaseballGleam")
BaseballGleam.add_flask(app)
app.debug = True
app.run()
| mit |
ywang037/delta-ntu-slerp4 | Training/train_mobilenet_casia_1771.py | 1 | 7420 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 14:47:47 2017
@author: slerp4
Compared with _debug version, this version excludes RMSprop optimizer
"""
#import tensorflow as tf
from keras import backend as K
from keras.applications.mobilenet import MobileNet
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.callbacks import LearningRateScheduler, CSVLogger
import os, importlib
from timeit import default_timer as timer
import datetime
import math
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
# check and set tensorflow as backend
if K.backend() != 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
importlib.reload(K)
assert K.backend() == 'tensorflow'
print('{} backend is sucessfully set'.format(K.backend()))
elif K.backend() == 'tensorflow':
print('{} backend has already been set'.format(K.backend()))
# force to use gpu:0 tesla k20c
# Creates a graph.
with tf.device('/device:GPU:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
# training hyper parameters
train_data_dir = '.\Datasets\casia-1771'
numclass = 1771
num_train_samples = 233505
batch_size = 64
#epochs = 100
alpha = 0.5 # choices=[0.25, 0.5, 0.75, 1.0]
inputsize = 224 # choices=[128, 160, 192, 224, 224], >=32 is ok
'''
# define step decay function - used to visualize learning rate change
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(step_decay(len(self.losses)))
print('Current learning rate:', step_decay(len(self.losses)))
'''
# learning rate schedule
def step_decay(epoch):
# initial_lrate = 0.01
drop = 0.5
epochs_drop = 20.0
lrate = init_lr * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# Setup the model
# using CASIA-WebFaces dataset for training, 10575 identities in total
model = MobileNet(alpha=alpha, depth_multiplier=1, dropout=1e-3,
include_top=True, weights=None, input_tensor=None, pooling=None, classes=numclass)
model.summary()
print('\nPrepare to train cnn model {}-MobileNet-224 with top layer included'.format(alpha))
#print('Total classes: {}'.format(numclass))
#print('Training samples: {}'.format(num_train_samples))
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
while optimizer_chosen not in ['A', 'B']:
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
epochs = int(input('Number of epochs? '))
while epochs < 0:
epochs = int(input('Use a positive integer as the number of epochs: '))
init_lr = float(input('Initial learning rate? '))
while init_lr < 0 or init_lr>0.2:
init_lr = float(input('Use a learning rate in [0, 0.2]: '))
# preparing training data
print('\nDataset path: '+ train_data_dir)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# load training and testing data
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(224, 224),
batch_size=batch_size)
# define the format of names of several files
stamp = str(alpha)+'-mobilenet-'+str(inputsize)+'-c{}-'.format(numclass)+'b{}-'.format(batch_size)+'e{}-'.format(epochs)
if optimizer_chosen == 'A':
# using step-decaying sgd
method = 'SGD'
print('\nUsing step-decaying stochastic gradient descent')
print('learning rate folds every 20 epochs')
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
'''
# use following scripts to have learning rate displayed
# learning schedule callback
loss_history = LossHistory()
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [loss_history, lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=2)
'''
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=1)
elif optimizer_chosen == 'B':
# using adam update as adaptive learning rate method
method = 'Adam'
print('\nUsing using adam update as adaptive learning rate method')
adam = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # original lr=0.001
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples // batch_size,
epochs=epochs, validation_data=None, callbacks=[csv_logger], verbose=1)
train_end = timer()
mins, secs = divmod(train_end-train_start,60)
hour, mins = divmod(mins,60)
print('Training process took %d:%02d:%02d' % (hour,mins,secs))
# set a stamp of file name for saving the record and weights
now = datetime.datetime.now() #current date and time
save_name = record +'-'+now.strftime("%Y%m%d-%H%M")
#print(history.history)
print(history.history.keys())
# print plots of acc and loss in one pdf
pp = PdfPages(save_name +'.pdf')
# summarize history for accuracy
plt.plot(history.history['acc']) # plt.plot(history.history['val_acc'])
plt_title = str(alpha)+'-mobilenet-'+str(inputsize)+' trained on small dataset'
plt_legend = method + ', {} classes'.format(numclass)+', batch size ={}'.format(batch_size)
plt.title(plt_title)
plt.ylabel('Model accuracy')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='lower right')
pp.savefig()
plt.show()
# summarize history for loss
plt.plot(history.history['loss']) #plt.plot(history.history['val_loss'])
plt.title(plt_title)
plt.ylabel('Model loss')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='upper left') #plt.legend(['train', 'test'], loc='upper left')
pp.savefig()
plt.show()
pp.close()
# save trained weights
model.save_weights(save_name +'.h5')
| mit |
endolith/scipy | scipy/stats/stats.py | 5 | 316939 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from scipy.spatial.distance import cdist
from scipy.ndimage import measurements
from scipy._lib._util import (check_random_state, MapWrapper,
rng_integers, float_factorial)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from dataclasses import make_dataclass
from ._hypotests import _all_partitions
# Functions/classes in other files should be added in `__init__.py`, not here
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation',
'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'F_onewayConstantInputWarning',
'F_onewayBadInputSizesWarning',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact',
'SpearmanRConstantInputWarning', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern']
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _shape_with_dropped_axis(a, axis):
"""
Given an array `a` and an integer `axis`, return the shape
of `a` with the `axis` dimension removed.
Examples
--------
>>> a = np.zeros((3, 5, 2))
>>> _shape_with_dropped_axis(a, 1)
(3, 2)
"""
shp = list(a.shape)
try:
del shp[axis]
except IndexError:
raise np.AxisError(axis, a.ndim) from None
return tuple(shp)
def _broadcast_shapes(shape1, shape2):
"""
Given two shapes (i.e. tuples of integers), return the shape
that would result from broadcasting two arrays with the given
shapes.
Examples
--------
>>> _broadcast_shapes((2, 1), (4, 1, 3))
(4, 2, 3)
"""
d = len(shape1) - len(shape2)
if d <= 0:
shp1 = (1,)*(-d) + shape1
shp2 = shape2
else:
shp1 = shape1
shp2 = (1,)*d + shape2
shape = []
for n1, n2 in zip(shp1, shp2):
if n1 == 1:
n = n2
elif n2 == 1 or n1 == n2:
n = n1
else:
raise ValueError(f'shapes {shape1} and {shape2} could not be '
'broadcast together')
shape.append(n)
return tuple(shape)
def _broadcast_shapes_with_dropped_axis(a, b, axis):
"""
Given two arrays `a` and `b` and an integer `axis`, find the
shape of the broadcast result after dropping `axis` from the
shapes of `a` and `b`.
Examples
--------
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((1, 9, 3))
>>> _broadcast_shapes_with_dropped_axis(a, b, 1)
(5, 3)
"""
shp1 = _shape_with_dropped_axis(a, axis)
shp2 = _shape_with_dropped_axis(b, axis)
try:
shp = _broadcast_shapes(shp1, shp2)
except ValueError:
raise ValueError(f'non-axis shapes {shp1} and {shp2} could not be '
'broadcast together') from None
return shp
def gmean(a, axis=0, dtype=None, weights=None):
"""Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
References
----------
.. [1] "Weighted Geometric Mean", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
return np.exp(np.average(log_a, axis=axis, weights=weights))
def hmean(a, axis=0, dtype=None):
"""Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to to zero.
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
with np.errstate(divide='ignore'):
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array,
# casting types in the process.
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.empty(a_view.shape[:-1], dtype=np.int_)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
moment_shape = list(a.shape)
del moment_shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
# empty array, return nan(s) with shape matching `moment`
out_shape = (moment_shape if np.isscalar(moment)
else [len(moment)] + moment_shape)
if len(out_shape) == 0:
return dtype(np.nan)
else:
return np.full(out_shape, np.nan, dtype=dtype)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mean = a.mean(axis, keepdims=True)
mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0 or moment == 1:
# By definition the zeroth moment about the mean is 1, and the first
# moment is 0.
shape = list(a.shape)
del shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (np.ones(shape, dtype=dtype) if moment == 0
else np.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = a.mean(axis, keepdims=True) if mean is None else mean
a_zero_mean = a - mean
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate', ddof=0):
"""Compute the coefficient of variation.
The coefficient of variation is the standard deviation divided by the
mean. This function is equivalent to::
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
The default for ``ddof`` is 0, but many definitions of the coefficient
of variation use the square root of the unbiased sample variance
for the sample standard deviation, which corresponds to ``ddof=1``.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
ddof : int, optional
Delta degrees of freedom. Default is 0.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis, ddof)
return a.std(axis, ddof=ddof) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = ~zero & (n > 2)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m4 / m2**2.0)
if not bias:
can_correct = ~zero & (n > 3)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected
for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, the length along each axis
slice is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of `a` along the given axis.
mean : ndarray or float
Arithmetic mean of `a` along the given axis.
variance : ndarray or float
Unbiased variance of `a` along the given axis; denominator is number
of observations minus one.
skewness : ndarray or float
Skewness of `a` along the given axis, based on moment calculations
with denominator equal to the number of observations, i.e. no degrees
of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher) of `a` along the given axis. The kurtosis is
normalized so that it is zero for the normal distribution. No
degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
variance=9.166666666666666, skewness=0.0,
kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def _normtest_finish(z, alternative):
"""Common code between all the normality-test functions."""
if alternative == 'less':
prob = distributions.norm.cdf(z)
elif alternative == 'greater':
prob = distributions.norm.sf(z)
elif alternative == 'two-sided':
prob = 2 * distributions.norm.sf(np.abs(z))
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if z.ndim == 0:
z = z[()]
return z, prob
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis, alternative)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(*_normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> kurtosistest(list(range(20)), alternative='less')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
>>> kurtosistest(list(range(20)), alternative='greater')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
>>> rng = np.random.default_rng()
>>> s = rng.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis, alternative)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(*_normtest_finish(Z, alternative))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> pts = 1000
>>> a = rng.normal(0, 1, size=pts)
>>> b = rng.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 8.4713e-19
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
Jarque_beraResult = namedtuple('Jarque_beraResult', ('statistic', 'pvalue'))
def jarque_bera(x):
"""Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = rng.normal(0, 1, 100000)
>>> jarque_bera_test = stats.jarque_bera(x)
>>> jarque_bera_test
Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775)
>>> jarque_bera_test.statistic
3.3415184718131554
>>> jarque_bera_test.pvalue
0.18810419594996775
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return Jarque_beraResult(jb_value, p)
#####################################
# FREQUENCY FUNCTIONS #
#####################################
# deindent to work around numpy/gh-16202
@np.deprecate(
message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score)
+ np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def _isconst(x):
"""
Check if all values in x are the same. nans are ignored.
x must be a 1d array.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([True])
else:
return (y[0] == y).all(keepdims=True)
def _quiet_nanmean(x):
"""
Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.mean(y, keepdims=True)
def _quiet_nanstd(x, ddof=0):
"""
Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.std(y, keepdims=True, ddof=ddof)
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the z-scores computed for the non-nan values.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
An example with `nan_policy='omit'`:
>>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
... [14.95, 16.06, 121.25, 94.35, 29.81]])
>>> stats.zscore(x, axis=1, nan_policy='omit')
array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602],
[-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]])
"""
return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle the occurrence of nans in `compare`.
'propagate' returns nan, 'raise' raises an exception, 'omit'
performs the calculations ignoring nan values. Default is
'propagate'. Note that when the value is 'omit', nans in `scores`
also propagate to the output, but they do not affect the z-scores
computed for the non-nan values.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
a = np.asanyarray(compare)
if a.size == 0:
return np.empty(a.shape)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if axis is None:
mn = _quiet_nanmean(a.ravel())
std = _quiet_nanstd(a.ravel(), ddof=ddof)
isconst = _isconst(a.ravel())
else:
mn = np.apply_along_axis(_quiet_nanmean, axis, a)
std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
isconst = np.apply_along_axis(_isconst, axis, a)
else:
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
if axis is None:
isconst = (a.item(0) == a).all()
else:
isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (scores - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> rng = np.random.default_rng()
>>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.810010162475324
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.'
) from w
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.'
) from w
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w) from w
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError as e:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types') from e
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
* 'raw' : No scaling, just return the raw IQR.
**Deprecated!** Use `scale=1` instead.
* 'normal' : Scale by
:math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 1.0. The use of scale='raw' is deprecated.
Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : {'linear', 'lower', 'higher', 'midpoint',
'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`.
The following options are available (default is 'linear'):
* 'linear': `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower': `i`.
* 'higher': `j`.
* 'nearest': `i` or `j` whichever is nearest.
* 'midpoint': `(i + j) / 2`.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
if scale_key == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _mad_1d(x, center, nan_policy):
# Median absolute deviation for 1-d array x.
# This is a helper function for `median_abs_deviation`; it assumes its
# arguments have been validated already. In particular, x must be a
# 1-d numpy array, center must be callable, and if nan_policy is not
# 'propagate', it is assumed to be 'omit', because 'raise' is handled
# in `median_abs_deviation`.
# No warning is generated if x is empty or all nan.
isnan = np.isnan(x)
if isnan.any():
if nan_policy == 'propagate':
return np.nan
x = x[~isnan]
if x.size == 0:
# MAD of an empty array is nan.
return np.nan
# Edge cases have been handled, so do the basic MAD calculation.
med = center(x)
mad = np.median(np.abs(x - med))
return mad
def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.5.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the
function signature ``func(arr, axis)``.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The default is 1.0. The string "normal" is also accepted,
and results in `scale` being the inverse of the standard normal
quantile function at 0.75, which is approximately 0.67449.
Array-like scale is also allowed, as long as it broadcasts correctly
to the output such that ``out / scale`` is a valid operation. The
output dimensions depend on the input array, `x`, and the `axis`
argument.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
The input array may contain `inf`, but if `center` returns `inf`, the
corresponding MAD for that data will be `nan`.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_abs_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_abs_deviation(x)
0.82832610097857
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_abs_deviation(x)
0.8323442311590675
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_abs_deviation(x)
array([3.5, 2.5, 1.5])
>>> stats.median_abs_deviation(x, axis=None)
2.0
Scale normal example:
>>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
>>> stats.median_abs_deviation(x)
1.3487398527041636
>>> stats.median_abs_deviation(x, scale='normal')
1.9996446978061115
"""
if not callable(center):
raise TypeError("The argument 'center' must be callable. The given "
f"value {repr(center)} is not callable.")
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
if scale.lower() == 'normal':
scale = 0.6744897501960817 # special.ndtri(0.75)
else:
raise ValueError(f"{scale} is not a valid scale value.")
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
if axis is None:
return np.nan
nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
if nan_shape == ():
# Return nan, not array(nan)
return np.nan
return np.full(nan_shape, np.nan)
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan:
if axis is None:
mad = _mad_1d(x.ravel(), center, nan_policy)
else:
mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
else:
if axis is None:
med = center(x, axis=None)
mad = np.median(np.abs(x - med))
else:
# Wrap the call to center() in expand_dims() so it acts like
# keepdims=True was used.
med = np.expand_dims(center(x, axis=axis), axis)
mad = np.median(np.abs(x - med), axis=axis)
return mad / scale
# Keep the top newline so that the message does not show up on the stats page
_median_absolute_deviation_deprec_msg = """
To preserve the existing default behavior, use
`scipy.stats.median_abs_deviation(..., scale=1/1.4826)`.
The value 1.4826 is not numerically precise for scaling
with a normal distribution. For a numerically precise value, use
`scipy.stats.median_abs_deviation(..., scale='normal')`.
"""
# Due to numpy/gh-16349 we need to unindent the entire docstring
@np.deprecate(old_name='median_absolute_deviation',
new_name='median_abs_deviation',
message=_median_absolute_deviation_deprec_msg)
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
if isinstance(scale, str):
if scale.lower() == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = 1.0
if not isinstance(scale, str):
scale = 1 / scale
return median_abs_deviation(x, axis=axis, center=center, scale=scale,
nan_policy=nan_policy)
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trim1(a, 0.5, 'left')
>>> b
array([10, 11, 12, 13, 14, 16, 15, 17, 18, 19])
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
class F_onewayConstantInputWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input is constant, e.g.
each of the samples provided is a constant array.
"""
def __init__(self, msg=None):
if msg is None:
msg = ("Each of the input arrays is constant;"
"the F statistic is not defined or infinite")
self.args = (msg,)
class F_onewayBadInputSizesWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input has length 0,
or if all the inputs have length 1.
"""
pass
def _create_f_oneway_nan_result(shape, axis):
"""
This is a helper function for f_oneway for creating the return values
in certain degenerate conditions. It creates return values that are
all nan with the appropriate shape for the given `shape` and `axis`.
"""
axis = np.core.multiarray.normalize_axis_index(axis, len(shape))
shp = shape[:axis] + shape[axis+1:]
if shp == ():
f = np.nan
prob = np.nan
else:
f = np.full(shp, fill_value=np.nan)
prob = f.copy()
return F_onewayResult(f, prob)
def _first(arr, axis):
"""Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
def f_oneway(*args, axis=0):
"""Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments. If the arrays are multidimensional, then all the
dimensions of the array must be the same except for `axis`.
axis : int, optional
Axis of the input arrays along which the test is applied.
Default is 0.
Returns
-------
statistic : float
The computed F statistic of the test.
pvalue : float
The associated p-value from the F distribution.
Warns
-----
F_onewayConstantInputWarning
Raised if each of the input arrays is constant array.
In this case the F statistic is either infinite or isn't defined,
so ``np.inf`` or ``np.nan`` is returned.
F_onewayBadInputSizesWarning
Raised if the length of any input array is 0, or if all the input
arrays have length 1. ``np.nan`` is returned for the F statistic
and the p-value in these cases.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still
be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
some loss of power.
The length of each group must be at least one, and there must be at
least one group with length greater than one. If these conditions
are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
is returned.
If each group contains constant values, and there exist at least two
groups with different values, the function generates a warning and
returns (``np.inf``, 0).
If all values in all groups are the same, function generates a warning
and returns (``np.nan``, ``np.nan``).
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> from scipy.stats import f_oneway
Here are some data [3]_ on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
`f_oneway` accepts multidimensional input arrays. When the inputs
are multidimensional and `axis` is not given, the test is performed
along the first axis of the input arrays. For the following data, the
test is performed three times, once for each column.
>>> a = np.array([[9.87, 9.03, 6.81],
... [7.18, 8.35, 7.00],
... [8.39, 7.58, 7.68],
... [7.45, 6.33, 9.35],
... [6.41, 7.10, 9.33],
... [8.00, 8.24, 8.44]])
>>> b = np.array([[6.35, 7.30, 7.16],
... [6.65, 6.68, 7.63],
... [5.72, 7.73, 6.72],
... [7.01, 9.19, 7.41],
... [7.75, 7.87, 8.30],
... [6.90, 7.97, 6.97]])
>>> c = np.array([[3.31, 8.77, 1.01],
... [8.25, 3.24, 3.62],
... [6.32, 8.81, 5.19],
... [7.48, 8.83, 8.91],
... [8.59, 6.01, 6.07],
... [3.07, 9.72, 7.48]])
>>> F, p = f_oneway(a, b, c)
>>> F
array([1.75676344, 0.03701228, 3.76439349])
>>> p
array([0.20630784, 0.96375203, 0.04733157])
"""
if len(args) < 2:
raise TypeError(f'at least two inputs are required; got {len(args)}.')
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
# We haven't explicitly validated axis, but if it is bad, this call of
# np.concatenate will raise np.AxisError. The call will raise ValueError
# if the dimensions of all the arrays, except the axis dimension, are not
# the same.
alldata = np.concatenate(args, axis=axis)
bign = alldata.shape[axis]
# Check this after forming alldata, so shape errors are detected
# and reported before checking for 0 length inputs.
if any(arg.shape[axis] == 0 for arg in args):
warnings.warn(F_onewayBadInputSizesWarning('at least one input '
'has length 0'))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Must have at least one group with length greater than 1.
if all(arg.shape[axis] == 1 for arg in args):
msg = ('all input arrays have length 1. f_oneway requires that at '
'least one input has length greater than 1.')
warnings.warn(F_onewayBadInputSizesWarning(msg))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Check if the values within each group are constant, and if the common
# value in at least one group is different from that in another group.
# Based on https://github.com/scipy/scipy/issues/11669
# If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
# then is_const is a boolean array with shape (num_groups, ...).
# It is True if the groups along the axis slice are each consant.
# In the typical case where each input array is 1-d, is_const is a
# 1-d array with length num_groups.
is_const = np.concatenate([(_first(a, axis) == a).all(axis=axis,
keepdims=True)
for a in args], axis=axis)
# all_const is a boolean array with shape (...) (see previous comment).
# It is True if the values within each group along the axis slice are
# the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
all_const = is_const.all(axis=axis)
if all_const.any():
warnings.warn(F_onewayConstantInputWarning())
# all_same_const is True if all the values in the groups along the axis=0
# slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean(axis=axis, keepdims=True)
alldata -= offset
normalized_ss = _square_of_sums(alldata, axis=axis) / bign
sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset, axis=axis) / a.shape[axis]
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
with np.errstate(divide='ignore', invalid='ignore'):
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
# Fix any f values that should be inf or nan because the corresponding
# inputs were constant.
if np.isscalar(f):
if all_same_const:
f = np.nan
prob = np.nan
elif all_const:
f = np.inf
prob = 0.0
else:
f[all_const] = np.inf
prob[all_const] = 0.0
f[all_same_const] = np.nan
prob[all_same_const] = np.nan
return F_onewayResult(f, prob)
def alexandergovern(*args, nan_policy='propagate'):
"""Performs the Alexander Govern test.
The Alexander-Govern approximation tests the equality of k independent
means in the face of heterogeneity of variance. The test is applied to
samples from two or more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two samples.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed A statistic of the test.
pvalue : float
The associated p-value from the chi-squared distribution.
Warns
-----
AlexanderGovernConstantInputWarning
Raised if an input is a constant array. The statistic is not defined
in this case, so ``np.nan`` is returned.
See Also
--------
f_oneway : one-way ANOVA
Notes
-----
The use of this test relies on several assumptions.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
instead relaxing the assumption of equal variances.
Input samples must be finite, one dimensional, and with size greater than
one.
References
----------
.. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
Approximation for ANOVA under Variance Heterogeneity." Journal
of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
Examples
--------
>>> from scipy.stats import alexandergovern
Here are some data on annual percentage rate of interest charged on
new car loans at nine of the largest banks in four American cities
taken from the National Institute of Standards and Technology's
ANOVA dataset.
We use `alexandergovern` to test the null hypothesis that all cities
have the same mean APR against the alternative that the cities do not
all have the same mean APR. We decide that a sigificance level of 5%
is required to reject the null hypothesis in favor of the alternative.
>>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
>>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
>>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
>>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
... 11.89]
>>> alexandergovern(atlanta, chicago, houston, memphis)
AlexanderGovernResult(statistic=4.65087071883494,
pvalue=0.19922132490385214)
The p-value is 0.1992, indicating a nearly 20% chance of observing
such an extreme value of the test statistic under the null hypothesis.
This exceeds 5%, so we do not reject the null hypothesis in favor of
the alternative.
"""
args = _alexandergovern_input_validation(args, nan_policy)
if np.any([(arg == arg[0]).all() for arg in args]):
warnings.warn(AlexanderGovernConstantInputWarning())
return AlexanderGovernResult(np.nan, np.nan)
# The following formula numbers reference the equation described on
# page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
# tests that serve as the basis for equation (8) but are not needed
# to perform the test.
# precalculate mean and length of each sample
lengths = np.array([ma.count(arg) if nan_policy == 'omit' else len(arg)
for arg in args])
means = np.array([np.mean(arg) for arg in args])
# (1) determine standard error of the mean for each sample
standard_errors = [np.std(arg, ddof=1) / np.sqrt(length)
for arg, length in zip(args, lengths)]
# (2) define a weight for each sample
inv_sq_se = 1 / np.square(standard_errors)
weights = inv_sq_se / np.sum(inv_sq_se)
# (3) determine variance-weighted estimate of the common mean
var_w = np.sum(weights * means)
# (4) determine one-sample t statistic for each group
t_stats = (means - var_w)/standard_errors
# calculate parameters to be used in transformation
v = lengths - 1
a = v - .5
b = 48 * a**2
c = (a * np.log(1 + (t_stats ** 2)/v))**.5
# (8) perform a normalizing transformation on t statistic
z = (c + ((c**3 + 3*c)/b) -
((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
(b**2*10 + 8*b*c**4 + 1000*b)))
# (9) calculate statistic
A = np.sum(np.square(z))
# "[the p value is determined from] central chi-square random deviates
# with k - 1 degrees of freedom". Alexander, Govern (94)
p = distributions.chi2.sf(A, len(args) - 1)
return AlexanderGovernResult(A, p)
def _alexandergovern_input_validation(args, nan_policy):
if len(args) < 2:
raise TypeError(f"2 or more inputs required, got {len(args)}")
# input arrays are flattened
args = [np.asarray(arg, dtype=float) for arg in args]
for i, arg in enumerate(args):
if np.size(arg) <= 1:
raise ValueError("Input sample size must be greater than one.")
if arg.ndim != 1:
raise ValueError("Input samples must be one-dimensional")
if np.isinf(arg).any():
raise ValueError("Input samples must be finite.")
contains_nan, nan_policy = _contains_nan(arg, nan_policy=nan_policy)
if contains_nan and nan_policy == 'omit':
args[i] = ma.masked_invalid(arg)
return args
AlexanderGovernResult = make_dataclass("AlexanderGovernResult", ("statistic",
"pvalue"))
class AlexanderGovernConstantInputWarning(RuntimeWarning):
"""Warning generated by `alexandergovern` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the statistic is not defined.")
self.args = (msg,)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficient may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. The p-value
roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. More precisely, for a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> s = 0.5
>>> x = stats.norm.rvs(size=500)
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> stats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = np.abs(x)
>>> stats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = np.where(x < 0, x, 0)
>>> stats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
See the Notes for more details.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table. This can be used as an alternative to
`fisher_exact` when the numbers in the table are large.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
boschloo_exact : Boschloo's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
*Null hypothesis and p-values*
The null hypothesis is that the input table is from the hypergeometric
distribution with parameters (as used in `hypergeom`)
``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
input table is ``[[a, b], [c, d]]``. This distribution has support
``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``
can be interpreted as the upper-left element of a 2x2 table, so the
tables in the distribution have form::
[ x n - x ]
[N - x M - (n + N) + x]
For example, if::
table = [6 2]
[1 4]
then the support is ``2 <= x <= 7``, and the tables in the distribution
are::
[2 6] [3 5] [4 4] [5 3] [6 2] [7 1]
[5 0] [4 1] [3 2] [2 3] [1 4] [0 5]
The probability of each table is given by the hypergeometric distribution
``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to
three significant digits)::
x 2 3 4 5 6 7
p 0.0163 0.163 0.408 0.326 0.0816 0.00466
These can be computed with::
>>> from scipy.stats import hypergeom
>>> table = np.array([[6, 2], [1, 4]])
>>> M = table.sum()
>>> n = table[0].sum()
>>> N = table[:, 0].sum()
>>> start, end = hypergeom.support(M, n, N)
>>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
0.004662 ])
The two-sided p-value is the probability that, under the null hypothesis,
a random table would have a probability equal to or less than the
probability of the input table. For our example, the probability of
the input table (where ``x = 6``) is 0.0816. The x values where the
probability does not exceed this are 2, 6 and 7, so the two-sided p-value
is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
>>> from scipy.stats import fisher_exact
>>> oddsr, p = fisher_exact(table, alternative='two-sided')
>>> p
0.10256410256410257
The one-sided p-value for ``alternative='greater'`` is the probability
that a random table has ``x >= a``, which in our example is ``x >= 6``,
or ``0.0816 + 0.00466 ~= 0.08626``::
>>> oddsr, p = fisher_exact(table, alternative='greater')
>>> p
0.08624708624708627
This is equivalent to computing the survival function of the
distribution at ``x = 5`` (one less than ``x`` from the input table,
because we want to include the probability of ``x = 6`` in the sum)::
>>> hypergeom.sf(5, M, n, N)
0.08624708624708627
For ``alternative='less'``, the one-sided p-value is the probability
that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
>>> oddsr, p = fisher_exact(table, alternative='less')
>>> p
0.9953379953379957
This is equivalent to computing the cumulative distribution function
of the distribution at ``x = 6``:
>>> hypergeom.cdf(6, M, n, N)
0.9953379953379957
*Odds ratio*
The calculated odds ratio is different from the one R uses. This SciPy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> from scipy.stats import fisher_exact
>>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
class SpearmanRConstantInputWarning(RuntimeWarning):
"""Warning generated by `spearmanr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate',
alternative='two-sided'):
"""Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypotheisis
is that two sets of data are uncorrelated. See `alternative` above
for alternative hypotheses. `pvalue` has the same
shape as `correlation`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
SpearmanrResult(correlation=0.82078..., pvalue=0.08858...)
>>> rng = np.random.default_rng()
>>> x2n = rng.standard_normal((100, 2))
>>> y2n = rng.standard_normal((100, 2))
>>> stats.spearmanr(x2n)
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> pval
array([[0. , 0.43111687, 0.41084066, 0.33891628],
[0.43111687, 0. , 0.15151618, 0.09600687],
[0.41084066, 0.15151618, 0. , 0.74938561],
[0.33891628, 0.09600687, 0.74938561, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> rng = np.random.default_rng()
>>> xint = rng.integers(10, size=(100, 2))
>>> stats.spearmanr(xint)
SpearmanrResult(correlation=0.09800224850707953, pvalue=0.3320271757932076)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
"supplied axis argument {}, please use only "
"values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 "
"variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
alternative=alternative)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).any(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
t, prob = _ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr`.
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
:doi:`10.1002/9781118445112.stat06227`
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate',
method='auto', variant='b'):
"""Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant: {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
return KendalltauResult(np.nan, np.nan)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True)
else:
raise ValueError("Only variant 'b' is supported for masked arrays")
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, min(dis, tot-dis))
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
pvalue = (special.erfc(np.abs(con_minus_dis) /
np.sqrt(var) / np.sqrt(2)))
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element (higher importance ranks being
associated with smaller values, e.g., 0 is the highest possible rank),
and a weigher function, which assigns a weight based on the rank to
each element. The weight of an exchange is then the sum or the product
of the weights of the ranks of the exchanged elements. The default
parameters compute :math:`\tau_\mathrm h`: an exchange between
elements with rank :math:`r` and :math:`s` (starting from zero) has
weight :math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters. Note that the convention used
here for ranking (lower values imply higher importance) is opposite
to that used by other SciPy statistical functions.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be "
"of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
# Return NaN if arrays are empty
return WeightedTauResult(np.nan, np.nan)
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError(
"All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size)
)
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive),
np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP:
"""Helper function to calculate parallel p-value."""
def __init__(self, x, y, random_states):
self.x = x
self.y = y
self.random_states = random_states
def __call__(self, index):
order = self.random_states[index].permutation(self.y.shape[0])
permy = self.y[order][:, order]
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(self.x, permy)[0]
return perm_stat
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
r"""Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
with MapWrapper(workers) as mapwrapper:
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
Note that this will not run if inputs are distance matrices.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. :arXiv:`1907.02088`
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing.
:arXiv:`1806.05514`
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
if compute_distance is None:
raise ValueError("Cannot run if inputs are distance matrices")
x, y = _two_sample_transform(x, y)
if compute_distance is not None:
# compute distance matrices for x and y
x = compute_distance(x)
y = compute_distance(y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(distx, disty):
r"""Helper function that calculates the MGC stat. See above for use.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)` or `(n, n)` and `(n, n)`
if distance matrices.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximium on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect: ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map: ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""Helper function that concatenates x and y for two sample MGC stat.
See above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`.
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the mean of the underlying distribution of the sample
is different than the given population mean (`popmean`)
* 'less': the mean of the underlying distribution of the sample is
less than the given population mean (`popmean`)
* 'greater': the mean of the underlying distribution of the sample is
greater than the given population mean (`popmean`)
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis, alternative)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t, alternative):
"""Common code between all 3 t-test functions."""
# We use ``stdtr`` directly here as it handles the case when ``nan``
# values are present in the data and masked arrays are passed
# while ``t.cdf`` emits runtime warnings. This way ``_ttest_finish``
# can be shared between the ``stats`` and ``mstats`` versions.
if alternative == 'less':
pval = special.stdtr(df, t)
elif alternative == 'greater':
pval = special.stdtr(df, -t)
elif alternative == 'two-sided':
pval = special.stdtr(df, -np.abs(t))*2
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
if pval.ndim == 0:
pval = pval[()]
return t, pval
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_nans(a, b, axis, namedtuple_type):
"""
Generate an array of `nan`, with shape determined by `a`, `b` and `axis`.
This function is used by ttest_ind and ttest_rel to create the return
value when one of the inputs has size 0.
The shapes of the arrays are determined by dropping `axis` from the
shapes of `a` and `b` and broadcasting what is left.
The return value is a named tuple of the type given in `namedtuple_type`.
Examples
--------
>>> a = np.zeros((9, 2))
>>> b = np.zeros((5, 1))
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=array([nan, nan]), pvalue=array([nan, nan]))
>>> a = np.zeros((3, 0, 9))
>>> b = np.zeros((1, 10))
>>> stat, p = _ttest_nans(a, b, -1, Ttest_indResult)
>>> stat
array([], shape=(3, 0), dtype=float64)
>>> p
array([], shape=(3, 0), dtype=float64)
>>> a = np.zeros(10)
>>> b = np.zeros(7)
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=nan, pvalue=nan)
"""
shp = _broadcast_shapes_with_dropped_axis(a, b, axis)
if len(shp) == 0:
t = np.nan
p = np.nan
else:
t = np.full(shp, fill_value=np.nan)
p = t.copy()
return namedtuple_type(t, p)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
permutations=None, random_state=None, alternative="two-sided",
trim=0):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
The 'omit' option is not currently available for permutation tests or
one-sided asympyotic tests.
permutations : non-negative int, np.inf, or None (default), optional
If 0 or None (default), use the t-distribution to calculate p-values.
Otherwise, `permutations` is the number of random permutations that
will be used to estimate p-values using a permutation test. If
`permutations` equals or exceeds the number of distinct partitions of
the pooled data, an exact test is performed instead (i.e. each
distinct partition is used exactly once). See Notes for details.
.. versionadded:: 1.7.0
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Pseudorandom number generator state used to generate permutations
(used only when `permutations` is not None).
.. versionadded:: 1.7.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
trim : float, optional
If nonzero, performs a trimmed (Yuen's) t-test.
Defines the fraction of elements to be trimmed from each end of the
input samples. If 0 (default), no elements will be trimmed from either
side. The number of trimmed elements from each tail is the floor of the
trim times the number of elements. Valid range is [0, .5).
.. versionadded:: 1.7
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Suppose we observe two independent samples, e.g. flower petal lengths, and
we are considering whether the two samples were drawn from the same
population (e.g. the same species of flower or two species with similar
petal characteristics) or two different populations.
The t-test quantifies the difference between the arithmetic means
of the two samples. The p-value quantifies the probability of observing
as or more extreme values assuming the null hypothesis, that the
samples are drawn from populations with the same population means, is true.
A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
our observation is not so unlikely to have occurred by chance. Therefore,
we do not reject the null hypothesis of equal population means.
If the p-value is smaller than our threshold, then we have evidence
against the null hypothesis of equal population means.
By default, the p-value is determined by comparing the t-statistic of the
observed data against a theoretical t-distribution.
When ``1 < permutations < binom(n, k)``, where
* ``k`` is the number of observations in `a`,
* ``n`` is the total number of observations in `a` and `b`, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either group `a`
or `b`, and the t-statistic is calculated. This process is performed
repeatedly (`permutation` times), generating a distribution of the
t-statistic under the null hypothesis, and the t-statistic of the observed
data is compared to this distribution to determine the p-value. When
``permutations >= binom(n, k)``, an exact test is performed: the data are
partitioned between the groups in each distinct way exactly once.
The permutation test can be computationally expensive and not necessarily
more accurate than the analytical test, but it does not make strong
assumptions about the shape of the underlying distribution.
Use of trimming is commonly referred to as the trimmed t-test. At times
called Yuen's t-test, this is an extension of Welch's t-test, with the
difference being the use of winsorized means in calculation of the variance
and the trimmed sample size in calculation of the statistic. Trimming is
reccomended if the underlying distribution is long-tailed or contaminated
with outliers [4]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
.. [3] http://en.wikipedia.org/wiki/Resampling_%28statistics%29
.. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
.. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
Accessed 30 Mar. 2021.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs2)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
>>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs3)
Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
>>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs4)
Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
>>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs5)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
>>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
When performing a permutation test, more permutations typically yields
more accurate results. Use a ``np.random.Generator`` to ensure
reproducibility:
>>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
... random_state=rng)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052)
Take these two samples, one of which has an extreme tail.
>>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
>>> b = (1.1, 2.9, 4.2)
Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
using 20% trimming, ``trim=.2``, the test will reduce the impact of one
(``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
>>> stats.ttest_ind(a, b, trim=.2)
Ttest_indResult(statistic=3.4463884028073513,
pvalue=0.01369338726499547)
"""
if not (0 <= trim < .5):
raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
if permutations or trim != 0:
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by permutation tests or "
"trimmed tests.")
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var, alternative)
if a.size == 0 or b.size == 0:
return _ttest_nans(a, b, axis, Ttest_indResult)
if permutations is not None and permutations != 0:
if trim != 0:
raise ValueError("Permutations are currently not supported "
"with trimming.")
if permutations < 0 or (np.isfinite(permutations) and
int(permutations) != permutations) :
raise ValueError("Permutations must be a non-negative integer.")
res = _permutation_ttest(a, b, permutations=permutations,
axis=axis, equal_var=equal_var,
nan_policy=nan_policy,
random_state=random_state,
alternative=alternative)
else:
n1 = a.shape[axis]
n2 = b.shape[axis]
if trim == 0:
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
m1 = np.mean(a, axis)
m2 = np.mean(b, axis)
else:
v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_trim_var_mean_len(a, trim, axis):
"""Variance, mean, and length of winsorized input along specified axis"""
# for use with `ttest_ind` when trimming.
# further calculations in this test assume that the inputs are sorted.
# From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
a = np.sort(a, axis=axis)
# `g` is the number of elements to be replaced on each tail, converted
# from a percentage amount of trimming
n = a.shape[axis]
g = int(n * trim)
# Calculate the Winsorized variance of the input samples according to
# specified `g`
v = _calculate_winsorized_variance(a, g, axis)
# the total number of elements in the trimmed samples
n -= 2 * g
# calculate the g-times trimmed mean, as defined in [4] (1-1)
m = trim_mean(a, trim, axis=axis)
return v, m, n
def _calculate_winsorized_variance(a, g, axis):
"""Calculates g-times winsorized variance along specified axis"""
# it is expected that the input `a` is sorted along the correct axis
if g == 0:
return np.var(a, ddof=1, axis=axis)
# move the intended axis to the end that way it is easier to manipulate
a_win = np.moveaxis(a, axis, -1)
# save where NaNs are for later use.
nans_indices = np.any(np.isnan(a_win), axis=-1)
# Winsorization and variance calculation are done in one step in [4]
# (1-3), but here winsorization is done first; replace the left and
# right sides with the repeating value. This can be see in effect in (
# 1-3) in [4], where the leftmost and rightmost tails are replaced with
# `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
# right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
# array indexing.
a_win[..., :g] = a_win[..., [g]]
a_win[..., -g:] = a_win[..., [-g - 1]]
# Determine the variance. In [4], the degrees of freedom is expressed as
# `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
# page 369, beginning of page 370). This is converted to NumPy's format,
# `n - ddof` for use with with `np.var`. The result is converted to an
# array to accommodate indexing later.
var_win = np.asarray(np.var(a_win, ddof=(2 * g + 1), axis=-1))
# with `nan_policy='propagate'`, NaNs may be completely trimmed out
# because they were sorted into the tail of the array. In these cases,
# replace computed variances with `np.nan`.
var_win[nans_indices] = np.nan
return var_win
def _broadcast_concatenate(xs, axis):
"""Concatenate arrays along an axis with broadcasting."""
# move the axis we're concatenating along to the end
xs = [np.swapaxes(x, axis, -1) for x in xs]
# determine final shape of all but the last axis
shape = np.broadcast(*[x[..., 0] for x in xs]).shape
# broadcast along all but the last axis
xs = [np.broadcast_to(x, shape + (x.shape[-1],)) for x in xs]
# concatenate along last axis
res = np.concatenate(xs, axis=-1)
# move the last axis back to where it was
res = np.swapaxes(res, axis, -1)
return res
def _data_partitions(data, permutations, size_a, axis=-1, random_state=None):
"""All partitions of data into sets of given lengths, ignoring order"""
random_state = check_random_state(random_state)
if axis < 0: # we'll be adding a new dimension at the end
axis = data.ndim + axis
# prepare permutation indices
size = data.shape[axis]
# number of distinct combinations
n_max = special.comb(size, size_a)
if permutations < n_max:
indices = np.array([random_state.permutation(size)
for i in range(permutations)]).T
else:
permutations = n_max
indices = np.array([np.concatenate(z)
for z in _all_partitions(size_a, size-size_a)]).T
data = data.swapaxes(axis, -1) # so we can index along a new dimension
data = data[..., indices] # generate permutations
data = data.swapaxes(-2, axis) # restore original axis order
data = np.moveaxis(data, -1, 0) # permutations indexed along axis 0
return data, permutations
def _calc_t_stat(a, b, equal_var, axis=-1):
"""Calculate the t statistic along the given dimension."""
na = a.shape[axis]
nb = b.shape[axis]
avg_a = np.mean(a, axis=axis)
avg_b = np.mean(b, axis=axis)
var_a = np.var(a, axis=axis, ddof=1)
var_b = np.var(b, axis=axis, ddof=1)
if not equal_var:
denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
else:
denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
return (avg_a-avg_b)/denom
def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
nan_policy='propagate', random_state=None,
alternative="two-sided"):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores
using permutation methods.
This test is similar to `stats.ttest_ind`, except it doesn't rely on an
approximate normality assumption since it uses a permutation test.
This function is only called from ttest_ind when permutations is not None.
Parameters
----------
a, b : array_like
The arrays must be broadcastable, except along the dimension
corresponding to `axis` (the zeroth, by default).
axis : int, optional
The axis over which to operate on a and b.
permutations: int, optional
Number of permutations used to calculate p-value. If greater than or
equal to the number of distinct permutations, perform an exact test.
equal_var: bool, optional
If False, an equal variance (Welch's) t-test is conducted. Otherwise,
an ordinary t-test is conducted.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Pseudorandom number generator state used for generating random
permutations.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
"""
random_state = check_random_state(random_state)
t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
na = a.shape[axis]
mat = _broadcast_concatenate((a, b), axis=axis)
mat = np.moveaxis(mat, axis, -1)
mat_perm, permutations = _data_partitions(mat, permutations, size_a=na,
random_state=random_state)
a = mat_perm[..., :na]
b = mat_perm[..., na:]
t_stat = _calc_t_stat(a, b, equal_var)
compare = {"less": np.less_equal,
"greater": np.greater_equal,
"two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
# Calculate the p-values
cmps = compare[alternative](t_stat, t_stat_observed)
pvalues = cmps.sum(axis=0) / permutations
# nans propagate naturally in statistic calculation, but need to be
# propagated manually into pvalues
if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
if np.ndim(pvalues) == 0:
pvalues = np.float64(np.nan)
else:
pvalues[np.isnan(t_stat_observed)] = np.nan
return (t_stat_observed, pvalues)
def _get_len(a, axis, msg):
try:
n = a.shape[axis]
except IndexError:
raise np.AxisError(axis, a.ndim, msg) from None
return n
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
"""Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a test for the null hypothesis that two related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs2)
Ttest_relResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672)
>>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs3)
Ttest_relResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis, alternative)
na = _get_len(a, axis, "first argument")
nb = _get_len(b, axis, "second argument")
if na != nb:
raise ValueError('unequal length arrays')
if na == 0:
return _ttest_nans(a, b, axis, Ttest_relResult)
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_relResult(t, prob)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""Count the number of non-masked elements of an array.
This function behaves like `np.ma.count`, but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def _m_broadcast_to(a, shape):
if np.ma.isMaskedArray(a):
return np.ma.masked_array(np.broadcast_to(a, shape),
mask=np.broadcast_to(a.mask, shape))
return np.broadcast_to(a, shape, subok=True)
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table.
scipy.stats.barnard_exact : An unconditional exact test. An alternative
to chi-squared test for small sample sizes.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5. According to [3]_, the
total number of samples is recommended to be greater than 13,
otherwise exact tests (such as Barnard's Exact test) should be used
because they do not overreject.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `chisquare` raises an error if the sums do not
agree within a relative tolerance of ``1e-8``.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
in the case of a correlated system of variables is such that it can be reasonably
supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
(1900), pp. 157-175.
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def _compute_dplus(cdfvals):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values below Uniform(0, 1)
"""
n = len(cdfvals)
return (np.arange(1.0, n + 1) / n - cdfvals).max()
def _compute_dminus(cdfvals):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values above Uniform(0, 1)
"""
n = len(cdfvals)
return (cdfvals - np.arange(0.0, n)/n).max()
def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D- (depending on the value
of 'alternative')
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.ks_1samp(x, stats.norm.cdf)
(0.44435602715924361, 0.038850142705171065)
>>> stats.ks_1samp(stats.norm.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that `` CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.100203351482..., pvalue=0.125544644447...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.ks_1samp(x, stats.norm.cdf)
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
Don't reject null hypothesis in favor of alternative hypothesis: two-sided
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.ks_1samp(stats.t.rvs(100,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.ks_1samp(stats.t.rvs(3,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
if np.ma.is_masked(x):
x = x.compressed()
N = len(x)
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative == 'greater':
Dplus = _compute_dplus(cdfvals)
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative == 'less':
Dminus = _compute_dminus(cdfvals)
return KstestResult(Dminus, distributions.ksone.sf(Dminus, N))
# alternative == 'two-sided':
Dplus = _compute_dplus(cdfvals)
Dminus = _compute_dminus(cdfvals)
D = np.max([Dplus, Dminus])
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = distributions.kstwo.sf(D, N)
elif mode == 'asymp':
prob = distributions.kstwobign.sf(D * np.sqrt(N))
else:
# mode == 'approx'
prob = 2 * distributions.ksone.sf(D, N)
prob = np.clip(prob, 0, 1)
return KstestResult(D, prob)
Ks_2sampResult = KstestResult
def _compute_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a
# sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficient at the end, but is not sufficient to avoid
# the large dyanamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficint
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
expnt = 0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Rescale if the right most value is over 2**900
val = A[maxj - minj - 1]
_, valexpt = math.frexp(val)
if valexpt > 900:
# Scaling to bring down to about 2**800 appears
# sufficient for sizes under 10000.
valexpt -= 800
A = np.ldexp(A, -valexpt)
expnt += valexpt
val = A[maxj - minj - 1]
# Now divide by the binomial (m+n)!/m!/n!
for i in range(1, n + 1):
val = (val * i) / (m + i)
_, valexpt = math.frexp(val)
if valexpt < -128:
val = np.ldexp(val, -valexpt)
expnt += valexpt
# Finally scale if needed.
return np.ldexp(val, expnt)
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
# / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with
# h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without
# previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom()
# may return a float. Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths)\
or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying continuous distributions F(x) and G(x)
of two independent samples. See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
If the KS statistic is small or the p-value is high, then we cannot
reject the null hypothesis in favor of the alternative.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs2)
KstestResult(statistic=0.24833333333333332, pvalue=5.846586728086578e-07)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs3)
KstestResult(statistic=0.07833333333333334, pvalue=0.4379658456442945)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs4)
KstestResult(statistic=0.12166666666666667, pvalue=0.05401863039081145)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
# Ensure sign of minS is not negative.
minS = np.clip(-np.min(cddiffs), 0, 1)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int32).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
def _parse_kstest_args(data1, data2, args, N):
# kstest allows many different variations of arguments.
# Pull out the parsing into a separate function
# (xvals, yvals, ) # 2sample
# (xvals, cdf function,..)
# (xvals, name of distribution, ...)
# (name of distribution, name of distribution, ...)
# Returns xvals, yvals, cdf
# where cdf is a cdf function, or None
# and yvals is either an array_like of values, or None
# and xvals is array_like.
rvsfunc, cdf = None, None
if isinstance(data1, str):
rvsfunc = getattr(distributions, data1).rvs
elif callable(data1):
rvsfunc = data1
if isinstance(data2, str):
cdf = getattr(distributions, data2).cdf
data2 = None
elif callable(data2):
cdf = data2
data2 = None
data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
return data1, data2, cdf
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='auto'):
"""
Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
goodness of fit.
The one-sample test compares the underlying distribution F(x) of a sample
against a given distribution G(x). The two-sample test compares the
underlying distributions of two independent samples. Both tests are valid
only for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used to generate random variables.
cdf : str, array_like or callable
If array_like, it should be a 1-D array of observations of random
variables, and the two-sample test is performed
(and rvs must be array_like).
If a callable, that callable is used to calculate the cdf.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used as the cdf function.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings or
callables.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice the
one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.444356027159..., pvalue=0.038850140086...)
>>> stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
The above lines are equivalent to:
>>> stats.kstest(stats.norm.rvs, 'norm', N=100)
KstestResult(statistic=0.113810164200..., pvalue=0.138690052319...) # may vary
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.kstest(x, 'norm', alternative='less')
KstestResult(statistic=0.1002033514..., pvalue=0.1255446444...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.kstest(x, 'norm', alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Don't reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
if cdf:
return ks_1samp(xvals, cdf, args=args, alternative=alternative,
mode=mode)
return ks_2samp(xvals, yvals, alternative=alternative, mode=mode)
def tiecorrect(rankvals):
"""Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y, alternative='two-sided'):
"""Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': one of the distributions (underlying `x` or `y`) is
stochastically greater than the other.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
Examples
--------
We can test the hypothesis that two independent unequal-sized samples are
drawn from the same distribution with computing the Wilcoxon rank-sum
statistic.
>>> from scipy.stats import ranksums
>>> rng = np.random.default_rng()
>>> sample1 = rng.uniform(-1, 1, 200)
>>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
>>> ranksums(sample1, sample2)
RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15) # may vary
>>> ranksums(sample1, sample2, alternative='less')
RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
>>> ranksums(sample1, sample2, alternative='greater')
RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
The p-value of less than ``0.05`` indicates that this test rejects the
hypothesis at the 5% significance level.
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
z, prob = _normtest_finish(z, alternative)
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, nan_policy='propagate'):
"""Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments. Samples must be one-dimensional.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution. The p-value returned is the survival function of
the chi square distribution evaluated at H.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
elif arg.ndim != 1:
raise ValueError("Samples must be one-dimensional.")
n = np.asarray(list(map(len, args)))
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('At least 3 sets of measurements must be given '
'for Friedman test, got {}.'.format(k))
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer',
'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
annayqho/TheCannon | presentations/for_michigan/make_talk_plots.py | 1 | 4618 | import pickle
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# make sample spectra
plot(dataset.wl, dataset.tr_flux[2,:], alpha=0.7, c='k')
title(r"Typical High-S/N LAMOST Spectrum", fontsize=27)
xlim(3500, 9500)
tick_params(axis='x', labelsize=27)
tick_params(axis='y', labelsize=27)
xlabel("Wavelength ($\AA$)", fontsize=27)
ylabel("Flux", fontsize=27)
savefig("typical_spec_snr186.png")
ID = "spec-55938-B5593806_sp04-159.fits"
# now find it in APOGEE...
ID = "aspcapStar-r5-v603-2M12252154+2732475.fits"
import pyfits
fits_file = ID
file_in = pyfits.open(fits_file)
flux = np.array(file_in[1].data)
npixels = len(flux)
start_wl = file_in[1].header['CRVAL1']
diff_wl = file_in[1].header['CDELT1']
val = diff_wl * (npixels) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10 ** aval for aval in wl_full_log]
wl = np.array(wl_full)
bad = flux == 0
wl = np.ma.array(wl, mask=bad)
flux = np.ma.array(flux, mask=bad)
plot(wl, flux, alpha=0.7, c='k')
xlim(15100, 17000)
ylim(0.6, 1.15)
title(r"Typical High-S/N APOGEE Spectrum", fontsize=27)
tight_layout()
savefig("typical_spec_snr186_apogee.png")
label_file = 'reference_labels.csv'
(test_ID, test_SNR) = pickle.load(open("test_ID_SNR.p", "r"))
# for each test ID, find its index in label_file IDs
ids = np.loadtxt(label_file, usecols=(0,), dtype=str, delimiter=',')
inds = [np.where(ids==test_ID_val) for test_ID_val in test_ID]
names = ['T_{eff}', '\log g', '[Fe/H]', '[\\alpha/Fe]']
lims = [[3900,6000], [0,5], [-2, 1], [-0.1,0.4]]
#id,teff,logg,feh,alpha,snr
teff = np.loadtxt(label_file, usecols=(2,), dtype=float, delimiter=',')
logg = np.loadtxt(label_file, usecols=(3,), dtype=float, delimiter=',')
feh = np.loadtxt(label_file, usecols=(4,), dtype=float, delimiter=',')
alpha = np.loadtxt(label_file, usecols=(5,), dtype=float, delimiter=',')
apogee_label_vals = np.vstack(
(teff[inds].flatten(), logg[inds].flatten(), feh[inds].flatten(), alpha[inds].flatten())).T
test_labels = pickle.load(open("test_labels.p", "r"))
for i in range(0, len(names)):
name = names[i]
cannon = np.array(test_labels[:,i])
orig = np.array(apogee_label_vals[:,i], dtype=float)
snr = test_SNR
#bad = orig < -8000
#good = snr > 50
#orig = np.ma.array(orig, mask=bad)
#cannon = np.ma.array(cannon, mask=bad)
#snr = np.ma.array(snr, mask=bad)
#orig = orig[good]
#cannon = cannon[good]
#snr = snr[good]
scatter = np.round(np.std(orig-cannon),3)
scatter = int(scatter)
bias = np.round(np.mean(orig-cannon),4)
bias = np.round(bias, 3)
low = np.minimum(min(orig), min(cannon))
high = np.maximum(max(orig), max(cannon))
fig = plt.figure(figsize=(10,6))
gs = gridspec.GridSpec(1,2,width_ratios=[2,1], wspace=0.3)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.plot([low, high], [low, high], 'k-', linewidth=2.0, label="x=y")
low = lims[i][0]
high = lims[i][1]
ax1.set_xlim(low, high)
ax1.set_ylim(low, high)
c = np.zeros(len(snr))
take = snr < 100
ax1.scatter(orig[take], cannon[take], marker='x', c='0.10', alpha=0.3, label="snr < 100")
take = snr > 100
ax1.scatter(orig[take], cannon[take], marker='x', c='k', label="snr > 100", alpha=0.7)
ax1.legend(fontsize=14, loc='lower right')
textstr = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax1.text(0.05, 0.95, textstr, transform=ax1.transAxes,
fontsize=14, verticalalignment='top')
ax1.tick_params(axis='x', labelsize=14)
ax1.tick_params(axis='y', labelsize=14)
ax1.set_xlabel("APOGEE $%s$" %name, fontsize=14)
ax1.set_ylabel("Cannon-LAMOST $%s$" %name, fontsize=14)
ax1.set_title("Cannon-LAMOST Output vs. APOGEE $%s$ " %name, fontsize=14)
diff = cannon - orig
npoints = len(diff)
mu = np.mean(diff)
sig = np.std(diff)
ax2.hist(diff, range=[-3*sig,3*sig], color='k', bins=np.sqrt(npoints),
orientation='horizontal', alpha=0.3, histtype='stepfilled')
textstr = r"$\sigma=%s$" %(np.round(sig,2))
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes,
fontsize=14, verticalalignment='top')
ax2.tick_params(axis='x', labelsize=14)
ax2.tick_params(axis='y', labelsize=14)
ax2.set_xlabel("Count", fontsize=14)
ax2.set_ylabel("Difference", fontsize=14)
ax2.axhline(y=0, c='k', lw=3, label='Difference=0')
ax2.set_title("Cannon-LAMOST Output Minus \n APOGEE Labels for $%s$" %name,
fontsize=14)
ax2.legend(fontsize=14, loc='lower center')
plt.savefig('1to1_%s.png'%i)
| mit |
fabianp/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/linear_model/coordinate_descent.py | 37 | 74167 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
Cadair/solarbextrapolation | solarbextrapolation/analyticalmodels/base.py | 1 | 8605 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:30:22 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
#import pickle
import time
from datetime import datetime
#from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
from mayavi import mlab
# Internal imports
#from solarbextrapolation.utilities import si_this_map
from solarbextrapolation.map3dclasses import Map3D
class AnalyticalModel(object):
"""
Common class for the development of anylitical models of magnetic fields.
Use the models to evaluate the accuracy of an extrapolation routine with
the figures of merit.
"""
def __init__(self, **kwargs):
# Default grid shape and physical ranges for the volume the model covers.
self.shape = kwargs.get('shape', u.Quantity([5, 5, 5] * u.pixel)) # (x,y,z)
self.xrange = kwargs.get('xrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('yrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('zrange', u.Quantity([0, 20] * u.Mm))
# Metadata
self.meta = {'ZNAXIS': 3, 'ZNAXIS1': self.shape[0].value, 'ZNAxXIS2': self.shape[0].value, 'ZNAXIS3': self.shape[0].value}
self.meta['analytical_model_notes'] = kwargs.get('notes', '')
self.meta['BUNIT'] = kwargs.get('bunit', u.T)
# CRVALn, CDELTn and NAXIS (alreadu in meta) used for storing range in 2D fits files.
self.filepath = kwargs.get('filepath', None)
self.routine = kwargs.get('analytical_model_routine', type(self))
# Default 3D magnetic field
#X,Y,Z = np.zeros(self.shape.value), np.zeros(self.shape.value), np.zeros(self.shape.value)
npField = np.zeros([3]+list(np.array(self.shape.value, dtype=np.int)))
self.field = Map3D(npField, self.meta)
# Default magnetic field on boundary
magnetogram = np.zeros(np.array(self.shape[0:2].value, dtype=np.int))
magnetogram_header = {'ZNAXIS': 2, 'ZNAXIS1': self.shape[0].value, 'ZNAXIS2': self.shape[1].value}
self.magnetogram = sunpy.map.Map((magnetogram, magnetogram_header))
def _generate_field(self, **kwargs):
"""
The method for running a model to generate the field.
This is the primary method to be edited in subclasses for specific
model implementations.
"""
# Model code goes here.
arr_4d = np.zeros([int(self.map_boundary_data.data.shape[0]), int(self.map_boundary_data.data.shape[1]), 1, 3])
# Turn the 4D array into a Map3D object.
map_output = Map3D( arr_4d, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange, xobsrange=self.xrange, yobsrange=self.yrange )
return map_output
def generate(self, **kwargs):
"""
Method to be called to calculate the vector field and return as a Map3D object.
Times and saves the extrapolation where applicable.
"""
# Record the time and duration of the extrapolation.
dt_start = datetime.now()
tim_start = time.time()
arr_output = self._generate_field(**kwargs)
tim_duration = time.time() - tim_start
# Add the duration and time to the meta/header data.
arr_output.meta['extrapolator_start_time'] = dt_start.isoformat()
arr_output.meta['extrapolator_duration'] = tim_duration
arr_output.meta['extrapolator_duration_unit'] = u.s
# Save the Map3D if a filepath has been set. (to avoid loosing work)
if self.filepath:
arr_output.save(self.filepath)
# Add the output map to the object and return.
self.map = arr_output
return arr_output
def to_los_magnetogram(self, **kwargs):
"""
Calculate the LoS vector field as a SunPy map and return.
Generally this will require that you have run generate(self, ``**kwargs``)
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field.
.. I'm not sure if this is a good default.
"""
return self.magnetogram
def to_vec_magnetogram(self, **kwargs):
"""
Calculate the vector field as a SunPy map and return.
Generally this will require that you have run ``generate(self, **kwargs)``
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field. ######### I'm not sure if this is a good default.
"""
return self.magnetogram
if __name__ == '__main__':
# User-specified parameters
tup_shape = ( 20, 20, 20 )
x_range = ( -80.0, 80 ) * u.Mm
y_range = ( -80.0, 80 ) * u.Mm
z_range = ( 0.0, 120 ) * u.Mm
# Derived parameters (make SI where applicable)
x_0 = x_range[0].to(u.m).value
Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value
x_size = Dx * tup_shape[0]
y_0 = y_range[0].to(u.m).value
Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value
y_size = Dy * tup_shape[1]
z_0 = z_range[0].to(u.m).value
Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value
z_size = Dy * tup_shape[2]
# Define the extrapolator as a child of the Extrapolators class
class AnaOnes(AnalyticalModel):
def __init__(self, **kwargs):
super(AnaOnes, self).__init__(**kwargs)
def _generate_field(self, **kwargs):
# Adding in custom parameters to the metadata
self.meta['analytical_model_routine'] = 'Ones Model'
# Generate a trivial field and return (X,Y,Z,Vec)
outshape = list(np.array(self.shape.value, dtype=np.int)) + [3]
arr_4d = np.ones(outshape)
return Map3D(arr_4d, self.meta)
# Setup an anylitical model
xrange = u.Quantity([ 50, 300] * u.arcsec)
yrange = u.Quantity([-350, -100] * u.arcsec)
zrange = u.Quantity([ 0, 250] * u.arcsec)
aAnaMod = AnaOnes()
aMap3D = aAnaMod.generate()
# Visualise the 3D vector field
from solarbextrapolation.visualisation_functions import visualise
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
boundary_units=[1.0*u.arcsec, 1.0*u.arcsec],
show_volume_axes=True,
debug=False)
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
show_volume_axes=False,
debug=False)
mlab.show()
"""
# For B_I field only, to save re-creating this interpolator for every cell.
A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 1000`0)
field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) )
for i in range(0, tup_shape[0]):
for j in range(0, tup_shape[1]):
for k in range(0, tup_shape[2]):
# Position of this point in space
x_pos = x_0 + ( i + 0.5 ) * Dx
y_pos = y_0 + ( j + 0.5 ) * Dy
z_pos = z_0 + ( k + 0.5 ) * Dz
#field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0)
#field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q)
#field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range )
np_boundary_data = field[:,:,0,2].T
dummyDataToMap(np_boundary_data, x_range, y_range)
#dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, }
visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True)
"""
| mit |
zorojean/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/io/json/test_ujson.py | 1 | 56098 | # -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(input, **encode_kwargs)
assert input == json.loads(output)
assert output == expected_output
assert input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded, ensure_ascii=True)
helper(not_html_encoded, ensure_ascii=False)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
def test_doubleLongIssue(self):
sut = {u('a'): -4342969734183514}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
assert sut == decoded
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_doubleLongDecimalIssue(self):
sut = {u('a'): -12345678901234.56789012}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
assert sut == decoded
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encodeNonCLocale(self):
import locale
savedlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
except:
try:
locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
except:
pytest.skip('Could not set locale for testing')
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads('4.78', precise_float=True) == 4.78
locale.setlocale(locale.LC_NUMERIC, savedlocale)
def test_encodeDecodeLongDecimal(self):
sut = {u('a'): -528656961.4399388}
encoded = ujson.dumps(sut, double_precision=15)
ujson.decode(encoded)
def test_decimalDecodeTestPrecise(self):
sut = {u('a'): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encodeDoubleTinyExponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1"),
u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1")}
output = ujson.encode(input)
input = {u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1")}
output = ujson.encode(input) # noqa
def test_encodeDoubleConversion(self):
input = math.pi
output = ujson.encode(input)
assert round(input, 5) == round(json.loads(output), 5)
assert round(input, 5) == round(ujson.decode(output), 5)
def test_encodeWithDecimal(self):
input = 1.0
output = ujson.encode(input)
assert output == "1.0"
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
assert round(input, 5) == round(json.loads(output), 5)
assert round(input, 5) == round(ujson.decode(output), 5)
def test_encodeArrayOfNestedArrays(self):
input = [[[[]]]] * 20
output = ujson.encode(input)
assert input == json.loads(output)
# assert output == json.dumps(input)
assert input == ujson.decode(output)
input = np.array(input)
tm.assert_numpy_array_equal(input, ujson.decode(
output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = ujson.encode(input)
assert input == json.loads(output)
# assert output == json.dumps(input)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
output = ujson.encode(input, double_precision=15)
assert input == json.loads(output)
assert input == ujson.decode(output)
output = ujson.encode(input, double_precision=9)
assert round(input, 9) == json.loads(output)
assert round(input, 9) == ujson.decode(output)
output = ujson.encode(input, double_precision=3)
assert round(input, 3) == json.loads(output)
assert round(input, 3) == ujson.decode(output)
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
pytest.raises(ValueError, ujson.encode, input, double_precision=20)
pytest.raises(ValueError, ujson.encode, input, double_precision=-1)
# will throw typeError
pytest.raises(TypeError, ujson.encode, input, double_precision='9')
# will throw typeError
pytest.raises(TypeError, ujson.encode,
input, double_precision=None)
def test_encodeStringConversion2(self):
input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(input)
assert input == json.loads(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
assert input == ujson.decode(output)
pass
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeControlEscaping(self):
input = "\x19"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert input == dec
assert enc == json_unicode(input)
def test_encodeUnicodeConversion2(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeUnicodeSurrogatePair(self):
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeUnicode4BytesUTF8(self):
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeUnicode4BytesUTF8Highest(self):
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeArrayInArray(self):
input = [[[[]]]]
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
input = 31337
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeIntNegConversion(self):
input = -31337
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeLongNegConversion(self):
input = -9223372036854775808
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
def test_encodeListConversion(self):
input = [1, 2, 3, 4]
output = ujson.encode(input)
assert input == json.loads(output)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(input) # noqa
assert input == json.loads(output)
assert input == ujson.decode(output)
assert input == ujson.decode(output)
pass
def test_encodeNoneConversion(self):
input = None
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeTrueConversion(self):
input = True
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeFalseConversion(self):
input = False
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
def test_encodeDatetimeConversion(self):
ts = time.time()
input = datetime.datetime.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
expected = calendar.timegm(input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encodeDateConversion(self):
ts = time.time()
input = datetime.date.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
tup = (input.year, input.month, input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encodeTimeConversion(self):
tests = [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
]
for test in tests:
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encodeTimeConversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encodeTimeConversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_nat(self):
input = NaT
assert ujson.encode(input) == 'null', "Expected null"
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < LooseVersion('1.7.0'):
pytest.skip("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
from pandas._libs.lib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(input, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decodeFromUnicode(self):
input = u("{\"obj\": 31337}")
dec1 = ujson.decode(input)
dec2 = ujson.decode(str(input))
assert dec1 == dec2
def test_encodeRecursionMax(self):
# 8 is the max recursion depth
class O2:
member = 0
pass
class O1:
member = 0
pass
input = O1()
input.member = O2()
input.member.member = input
try:
output = ujson.encode(input) # noqa
assert False, "Expected overflow exception"
except(OverflowError):
pass
def test_encodeDoubleNan(self):
input = np.nan
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleInf(self):
input = np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleNegInf(self):
input = -np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_decodeJibberish(self):
input = "fdsa sda v9sa fdsa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayStart(self):
input = "["
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectStart(self):
input = "{"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayEnd(self):
input = "]"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeArrayDepthTooBig(self):
input = '[' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeObjectDepthTooBig(self):
input = '{' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUntermEscapeSequence(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringBadEscape(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeTrueBroken(self):
input = "tru"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeFalseBroken(self):
input = "fa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNullBroken(self):
input = "n"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenDictKeyTypeLeakTest(self):
input = '{{1337:""}}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except ValueError:
continue
assert False, "Wrong exception"
def test_decodeBrokenDictLeakTest(self):
input = '{{"key":"}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeBrokenListLeakTest(self):
input = '[[[true'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeDictWithNoKey(self):
input = "{{{{31337}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoColonOrValue(self):
input = "{{{{\"key\"}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoValue(self):
input = "{{{{\"key\":}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNumericIntPos(self):
input = "31337"
assert 31337 == ujson.decode(input)
def test_decodeNumericIntNeg(self):
input = "-31337"
assert -31337 == ujson.decode(input)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encodeUnicode4BytesUTF8Fail(self):
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input) # noqa
assert False, "Expected exception"
except OverflowError:
pass
def test_encodeNullCharacter(self):
input = "31337 \x00 1337"
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
input = "\x00"
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
pass
def test_decodeNullCharacter(self):
input = "\"31337 \\u0000 31337\""
assert ujson.decode(input) == json.loads(input)
def test_encodeListLongConversion(self):
input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807, 9223372036854775807]
output = ujson.encode(input)
assert input == json.loads(output)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(input),
ujson.decode(output, numpy=True,
dtype=np.int64))
pass
def test_encodeLongConversion(self):
input = 9223372036854775807
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_numericIntExp(self):
input = "1337E40"
output = ujson.decode(input)
assert output == json.loads(input)
def test_numericIntFrcExp(self):
input = "1.337E40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
input = "1337E+9"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpePLUS(self):
input = "1.337e+40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpE(self):
input = "1337E40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpe(self):
input = "1337e40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpEMinus(self):
input = "1.337E-4"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpeMinus(self):
input = "1.337e-4"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_dumpToFile(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dumpToFileLikeObject(self):
class filelike:
def __init__(self):
self.bytes = ''
def write(self, bytes):
self.bytes += bytes
f = filelike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dumpFileArgsError(self):
try:
ujson.dump([], '')
except TypeError:
pass
else:
assert False, 'expected TypeError'
def test_loadFile(self):
f = StringIO("[1,2,3,4]")
assert [1, 2, 3, 4] == ujson.load(f)
f = StringIO("[1,2,3,4]")
tm.assert_numpy_array_equal(
np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class filelike:
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
f = filelike()
assert [1, 2, 3, 4] == ujson.load(f)
f = filelike()
tm.assert_numpy_array_equal(
np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encodeNumericOverflow(self):
try:
ujson.encode(12839128391289382193812939)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_encodeNumericOverflowNested(self):
for n in range(0, 100):
class Nested:
x = 12839128391289382193812939
nested = Nested()
try:
ujson.encode(nested)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_decodeNumberWith32bitSignBit(self):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
boundary1 = 2**31 # noqa
boundary2 = 2**32 # noqa
docs = (
'{"id": 3590016419}',
'{{"id": {low}}}'.format(low=2**31),
'{{"id": {high}}}'.format(high=2**32),
'{{"id": {one_less}}}'.format(one_less=(2**32) - 1),
)
results = (3590016419, 2**31, 2**32, 2**32 - 1)
for doc, result in zip(docs, results):
assert ujson.decode(doc)['id'] == result
def test_encodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
input = base * 1024 * 1024 * 2
output = ujson.encode(input) # noqa
def test_decodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
quote = compat.str_to_bytes("\"")
input = quote + (base * 1024 * 1024 * 2) + quote
output = ujson.decode(input) # noqa
def test_toDict(self):
d = {u("key"): 31337}
class DictTest:
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_defaultHandler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(obj):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(obj):
raise TypeError("I raise for anything")
with tm.assert_raises_regex(TypeError, "I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(obj):
return 42
assert ujson.decode(ujson.encode(
_TestObject("foo"), default_handler=my_int_handler)) == 42
def my_obj_handler(obj):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
l = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(l, default=str)) ==
ujson.decode(ujson.encode(l, default_handler=str)))
class TestNumpyJSONTests(object):
def test_Bool(self):
b = np.bool(True)
assert ujson.decode(ujson.encode(b)) == b
def test_BoolArray(self):
inpt = np.array([True, False, True, True, False, True, False, False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
tm.assert_numpy_array_equal(inpt, outp)
def test_Int(self):
num = np.int(2562010)
assert np.int(ujson.decode(ujson.encode(num))) == num
num = np.int8(127)
assert np.int8(ujson.decode(ujson.encode(num))) == num
num = np.int16(2562010)
assert np.int16(ujson.decode(ujson.encode(num))) == num
num = np.int32(2562010)
assert np.int32(ujson.decode(ujson.encode(num))) == num
num = np.int64(2562010)
assert np.int64(ujson.decode(ujson.encode(num))) == num
num = np.uint8(255)
assert np.uint8(ujson.decode(ujson.encode(num))) == num
num = np.uint16(2562010)
assert np.uint16(ujson.decode(ujson.encode(num))) == num
num = np.uint32(2562010)
assert np.uint32(ujson.decode(ujson.encode(num))) == num
num = np.uint64(2562010)
assert np.uint64(ujson.decode(ujson.encode(num))) == num
def test_IntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
tm.assert_numpy_array_equal(inpt, outp)
def test_IntMax(self):
num = np.int(np.iinfo(np.int).max)
assert np.int(ujson.decode(ujson.encode(num))) == num
num = np.int8(np.iinfo(np.int8).max)
assert np.int8(ujson.decode(ujson.encode(num))) == num
num = np.int16(np.iinfo(np.int16).max)
assert np.int16(ujson.decode(ujson.encode(num))) == num
num = np.int32(np.iinfo(np.int32).max)
assert np.int32(ujson.decode(ujson.encode(num))) == num
num = np.uint8(np.iinfo(np.uint8).max)
assert np.uint8(ujson.decode(ujson.encode(num))) == num
num = np.uint16(np.iinfo(np.uint16).max)
assert np.uint16(ujson.decode(ujson.encode(num))) == num
num = np.uint32(np.iinfo(np.uint32).max)
assert np.uint32(ujson.decode(ujson.encode(num))) == num
if not compat.is_platform_32bit():
num = np.int64(np.iinfo(np.int64).max)
assert np.int64(ujson.decode(ujson.encode(num))) == num
# uint64 max will always overflow as it's encoded to signed
num = np.uint64(np.iinfo(np.int64).max)
assert np.uint64(ujson.decode(ujson.encode(num))) == num
def test_Float(self):
num = np.float(256.2013)
assert np.float(ujson.decode(ujson.encode(num))) == num
num = np.float32(256.2013)
assert np.float32(ujson.decode(ujson.encode(num))) == num
num = np.float64(256.2013)
assert np.float64(ujson.decode(ujson.encode(num))) == num
def test_FloatArray(self):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
dtypes = (np.float, np.float32, np.float64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(
inpt, double_precision=15)), dtype=dtype)
tm.assert_almost_equal(inpt, outp)
def test_FloatMax(self):
num = np.float(np.finfo(np.float).max / 10)
tm.assert_almost_equal(np.float(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
num = np.float32(np.finfo(np.float32).max / 10)
tm.assert_almost_equal(np.float32(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
num = np.float64(np.finfo(np.float64).max / 10)
tm.assert_almost_equal(np.float64(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
def test_Arrays(self):
arr = np.arange(100)
arr = arr.reshape((10, 10))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32)
arr = arr.reshape((5, 5, 4))
outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
tm.assert_almost_equal(arr, outp)
outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
tm.assert_almost_equal(arr, outp)
def test_OdArray(self):
def will_raise():
ujson.encode(np.array(1))
pytest.raises(TypeError, will_raise)
def test_ArrayNumpyExcept(self):
input = ujson.dumps([42, {}, 'a'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps(['a', 'b', [], 'c'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, ['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{}, []])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, None])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 'b'}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps({'a': {'b': {'c': 42}}})
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
def test_ArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(['a']) == output[1]).all()
assert output[2] is None
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
assert (np.array([42]) == output[0]).all()
assert output[1] is None
assert (np.array([u('a')]) == output[2]).all()
# Write out the dump explicitly so there is no dependency on iteration
# order GH10837
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expectedvals == output[0]).all()
assert output[1] is None
assert (np.array([u('a'), 'b']) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expectedvals == output[0]).all()
assert (np.array(['1', '2', '3']) == output[1]).all()
assert (np.array(['a', 'b']) == output[2]).all()
class TestPandasJSONTests(object):
def test_DataFrame(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
assert (df.values == outp.values).all()
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
assert (df.transpose() == outp).values.all()
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
def test_DataFrameNumpy(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"),
numpy=True))
assert (df.transpose() == outp).values.all()
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
def test_DataFrameNested(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
nested = {'df1': df, 'df2': df.copy()}
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
assert ujson.decode(ujson.encode(nested)) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
assert ujson.decode(ujson.encode(nested, orient="index")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
assert ujson.decode(ujson.encode(nested, orient="records")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
assert ujson.decode(ujson.encode(nested, orient="values")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
assert ujson.decode(ujson.encode(nested, orient="split")) == exp
def test_DataFrameNumpyLabelled(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df),
numpy=True, labelled=True))
assert (df.T == outp).values.all()
tm.assert_index_equal(df.T.columns, outp.columns)
tm.assert_index_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"),
numpy=True, labelled=True))
outp.index = df.index
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"),
numpy=True, labelled=True))
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
def test_Series(self):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
# column indexed
outp = Series(ujson.decode(ujson.encode(s))).sort_values()
exp = Series([10, 20, 30, 40, 50, 60],
index=['6', '7', '8', '9', '10', '15'])
tm.assert_series_equal(outp, exp)
outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
tm.assert_series_equal(outp, exp)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
outp = Series(**dec)
tm.assert_series_equal(outp, s)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
numpy=True))
outp = Series(**dec)
exp_np = Series(np.array([10, 20, 30, 40, 50, 60]))
exp_pd = Series([10, 20, 30, 40, 50, 60])
outp = Series(ujson.decode(ujson.encode(s, orient="records"),
numpy=True))
tm.assert_series_equal(outp, exp_np)
outp = Series(ujson.decode(ujson.encode(s, orient="records")))
exp = Series([10, 20, 30, 40, 50, 60])
tm.assert_series_equal(outp, exp_pd)
outp = Series(ujson.decode(ujson.encode(s, orient="values"),
numpy=True))
tm.assert_series_equal(outp, exp_np)
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
tm.assert_series_equal(outp, exp_pd)
outp = Series(ujson.decode(ujson.encode(
s, orient="index"))).sort_values()
exp = Series([10, 20, 30, 40, 50, 60],
index=['6', '7', '8', '9', '10', '15'])
tm.assert_series_equal(outp, exp)
outp = Series(ujson.decode(ujson.encode(
s, orient="index"), numpy=True)).sort_values()
tm.assert_series_equal(outp, exp)
def test_SeriesNested(self):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {'s1': s, 's2': s.copy()}
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
assert ujson.decode(ujson.encode(nested)) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
assert ujson.decode(ujson.encode(nested, orient="split")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
assert ujson.decode(ujson.encode(nested, orient="records")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
assert ujson.decode(ujson.encode(nested, orient="values")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
assert ujson.decode(ujson.encode(nested, orient="index")) == exp
def test_Index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# column indexed
outp = Index(ujson.decode(ujson.encode(i)), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i), numpy=True), name='index')
tm.assert_index_equal(i, outp)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
assert i.name == outp.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
assert i.name == outp.name
outp = Index(ujson.decode(ujson.encode(i, orient="values")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="records")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="records"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="index")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="index"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
def test_datetimeindex(self):
from pandas.core.indexes.datetimes import date_range
rng = date_range('1/1/2000', periods=20)
encoded = ujson.encode(rng, date_unit='ns')
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
tm.assert_index_equal(rng, decoded)
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
def test_decodeArrayTrailingCommaFail(self):
input = "[31337,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayLeadingCommaFail(self):
input = "[,31337]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayOnlyCommaFail(self):
input = "[,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayUnmatchedBracketFail(self):
input = "[]]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayEmpty(self):
input = "[]"
ujson.decode(input)
def test_decodeArrayOneItem(self):
input = "[31337]"
ujson.decode(input)
def test_decodeBigValue(self):
input = "9223372036854775807"
ujson.decode(input)
def test_decodeSmallValue(self):
input = "-9223372036854775808"
ujson.decode(input)
def test_decodeTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeWithTrailingWhitespaces(self):
input = "{}\n\t "
ujson.decode(input)
def test_decodeWithTrailingNonWhitespaces(self):
try:
input = "{}\n\t a"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayWithBigInt(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayFaultyUnicode(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeFloatingPointAdditionalTests(self):
places = 15
tm.assert_almost_equal(-1.1234567893,
ujson.loads("-1.1234567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.234567893,
ujson.loads("-1.234567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.34567893,
ujson.loads("-1.34567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.4567893,
ujson.loads("-1.4567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.567893,
ujson.loads("-1.567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.67893,
ujson.loads("-1.67893"),
check_less_precise=places)
tm.assert_almost_equal(-1.7893, ujson.loads("-1.7893"),
check_less_precise=places)
tm.assert_almost_equal(-1.893, ujson.loads("-1.893"),
check_less_precise=places)
tm.assert_almost_equal(-1.3, ujson.loads("-1.3"),
check_less_precise=places)
tm.assert_almost_equal(1.1234567893, ujson.loads(
"1.1234567893"), check_less_precise=places)
tm.assert_almost_equal(1.234567893, ujson.loads(
"1.234567893"), check_less_precise=places)
tm.assert_almost_equal(
1.34567893, ujson.loads("1.34567893"), check_less_precise=places)
tm.assert_almost_equal(
1.4567893, ujson.loads("1.4567893"), check_less_precise=places)
tm.assert_almost_equal(
1.567893, ujson.loads("1.567893"), check_less_precise=places)
tm.assert_almost_equal(1.67893, ujson.loads("1.67893"),
check_less_precise=places)
tm.assert_almost_equal(1.7893, ujson.loads("1.7893"),
check_less_precise=places)
tm.assert_almost_equal(1.893, ujson.loads("1.893"),
check_less_precise=places)
tm.assert_almost_equal(1.3, ujson.loads("1.3"),
check_less_precise=places)
def test_encodeBigSet(self):
s = set()
for x in range(0, 100000):
s.add(x)
ujson.encode(s)
def test_encodeEmptySet(self):
s = set()
assert "[]" == ujson.encode(s)
def test_encodeSet(self):
s = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
assert v in s
def _clean_dict(d):
return {str(k): v for k, v in compat.iteritems(d)}
| bsd-3-clause |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/sphinxext/ipython_directive.py | 12 | 42845 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
Pseudo-Decorators
=================
Note: Only one decorator is supported per input. If more than one decorator
is specified, then only the last one is used.
In addition to the Pseudo-Decorators/options described at the above link,
several enhancements have been made. The directive will emit a message to the
console at build-time if code-execution resulted in an exception or warning.
You can suppress these on a per-block basis by specifying the :okexcept:
or :okwarning: options:
.. code-block:: rst
.. ipython::
:okexcept:
:okwarning:
In [1]: 1/0
In [2]: # raise warning.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import atexit
import os
import re
import sys
import tempfile
import ast
import warnings
import shutil
# Third-party
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
# Our own
from traitlets.config import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.tmp_profile_dir = tmp_profile_dir
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
# this is assigned by the SetUp method of IPythonDirective
# to point at itself.
#
# So, you can access handy things at self.directive.state
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def cleanup(self):
shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# The "rest" is the standard output of the input. This needs to be
# added when in verbatim mode. If there is no "rest", then we don't
# add it, as the new line will be added by the processed output.
ret.append(rest)
# Fetch the processed output. (This is not the submitted output.)
self.cout.seek(0)
processed_output = self.cout.read()
if not is_suppress and not is_semicolon:
#
# In IPythonDirective.run, the elements of `ret` are eventually
# combined such that '' entries correspond to newlines. So if
# `processed_output` is equal to '', then the adding it to `ret`
# ensures that there is a blank line between consecutive inputs
# that have no outputs, as in:
#
# In [1]: x = 4
#
# In [2]: x = 5
#
# When there is processed output, it has a '\n' at the tail end. So
# adding the output to `ret` will provide the necessary spacing
# between consecutive input/output blocks, as in:
#
# In [1]: x
# Out[1]: 5
#
# In [2]: x
# Out[2]: 5
#
# When there is stdout from the input, it also has a '\n' at the
# tail end, and so this ensures proper spacing as well. E.g.:
#
# In [1]: print x
# 5
#
# In [2]: x = 5
#
# When in verbatim mode, `processed_output` is empty (because
# nothing was passed to IP. Sometimes the submitted code block has
# an Out[] portion and sometimes it does not. When it does not, we
# need to ensure proper spacing, so we have to add '' to `ret`.
# However, if there is an Out[] in the submitted code, then we do
# not want to add a newline as `process_output` has stuff to add.
# The difficulty is that `process_input` doesn't know if
# `process_output` will be called---so it doesn't know if there is
# Out[] in the code block. The requires that we include a hack in
# `process_block`. See the comments there.
#
ret.append(processed_output)
elif is_semicolon:
# Make sure there is a newline after the semicolon.
ret.append('')
# context information
filename = "Unknown"
lineno = 0
if self.directive.state:
filename = self.directive.state.document.current_source
lineno = self.directive.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in processed_output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(processed_output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(('-' * 76) + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, processed_output,
is_doctest, decorator, image_file, image_directive)
def process_output(self, data, output_prompt, input_lines, output,
is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
# Recall: `data` is the submitted output, and `output` is the processed
# output from `input_lines`.
TAB = ' ' * 4
if is_doctest and output is not None:
found = output # This is the processed output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
# When in verbatim mode, this holds additional submitted output
# to be written in the final Sphinx output.
# https://github.com/ipython/ipython/issues/5776
out_data = []
is_verbatim = decorator=='@verbatim' or self.is_verbatim
if is_verbatim and data.strip():
# Note that `ret` in `process_block` has '' as its last element if
# the code block was in verbatim mode. So if there is no submitted
# output, then we will have proper spacing only if we do not add
# an additional '' to `out_data`. This is why we condition on
# `and data.strip()`.
# The submitted output has no output prompt. If we want the
# prompt and the code to appear, we need to join them now
# instead of adding them separately---as this would create an
# undesired newline. How we do this ultimately depends on the
# format of the output regex. I'll do what works for the default
# prompt for now, and we might have to adjust if it doesn't work
# in other cases. Finally, the submitted output does not have
# a trailing newline, so we must add it manually.
out_data.append("{0} {1}\n".format(output_prompt, data))
return out_data
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
outdir = self.state.document.settings.env.app.outdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path or '_static'
if isinstance(savefig_dir, list):
savefig_dir = os.path.join(*savefig_dir)
savefig_dir = os.path.join(outdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
# parts consists of all text within the ipython-block.
# Each part is an input/output block.
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' {0}'.format(line)
for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines) > 2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| gpl-2.0 |
cs-chan/Deep-Plant | GRU-CFA/Codes/visualClef.py | 1 | 18504 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 15:24:59 2018
@author: root
"""
import cPickle as pkl
import numpy
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import skimage
import skimage.transform
import skimage.io
from PIL import Image, ImageEnhance
import scipy.misc
import tensorflow as tf
import numpy as np
import os
import struct
import scipy.io as sio
from array import array as pyarray
from numpy import array, int8, uint8, zeros
import collections
import pickle
import functools
import sets
from tensorflow.python.ops import rnn, array_ops
from tensorflow.contrib.rnn import GRUCell, DropoutWrapper, MultiRNNCell
from tensorflow.python import debug as tf_debug
from attn_7_1_ex import VariableSequenceClassification
from temp_createStruct5 import ConstructLookupTable
from time import gmtime, strftime
from logging_util import makelog
logfile=makelog()
class DataSet(object):
def __init__(self, layername, numMap):
"""Construct a DataSet."""
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/train_obs_list.mat')
self._trainList = mat_contents['train_obs_list']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/train_obs_class.mat')
self._trainLabels = mat_contents['train_obs_class']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_list.mat')
self._testList = mat_contents['test_obs_list']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_class.mat')
self._testLabels = mat_contents['test_obs_class']
self.layerextract = layername
self.numMap = numMap
self._num_examples = self._trainLabels.shape[0]
self._perm_list = np.arange(self._num_examples)
np.random.shuffle(self._perm_list)
self._trainLabelsPerm = self._trainLabels[self._perm_list]
self._num_testexamples = self._testLabels.shape[0]
self._perm_list_test = np.arange(self._num_testexamples)
self._batch_seq = 0
self._epochs_completed = 0
self._index_in_epoch = 0
self._index_in_epoch_test = 0
self._max_seq = 0
self.Batch_Up_model = ConstructLookupTable()
self.mydict2_test256 = self.Batch_Up_model.main(self._testList,2) # for train_testID ! = 1
self.feature_size_conv = self.numMap*14*14
self.feature_size_fc = 4096
def trainList(self):
return self._trainList
def trainLabels(self):
return self._trainLabels
def trainLabelsPerm(self):
return self._trainLabelsPerm
def testList(self):
return self._testList
def testLabels(self):
return self._testLabels
def num_examples(self):
return self._num_examples
def num_testexamples(self):
return self._num_testexamples
def epochs_completed(self):
return self._epochs_completed
def index_in_epoch(self):
return self._index_in_epoch
def max_seq(self):
return self._max_seq
def batch_seq(self):
return self._batch_seq
def PrepareTrainingBatch(self,Newpermbatch, batch_size, indicator):
if indicator == 1:
mydictG = self.Batch_Up_model.main(self._trainList,1) # for train_testID == 1
else:
mydictG = self.mydict2_test256
i = 0
temp = np.zeros(batch_size)
while i < batch_size:
temp[i] = len(mydictG[Newpermbatch[i]][1])
i = i + 1
self._max_seq = int(np.amax(temp))
self._batch_seq = temp
batch_conv = np.zeros([batch_size,self._max_seq,self.feature_size_conv])
batch_fc = np.zeros([batch_size,self._max_seq,self.feature_size_fc])
i = 0
while i < batch_size:
media_length = len(mydictG[Newpermbatch[i]][1])
j = 0
while j < media_length:
### for 256 image size for testing
# pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
# output = pickle.load(pkl_file1)
# pkl_file1.close()
#
# pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
# output2 = pickle.load(pkl_file2)
# pkl_file2.close()
#
# pkl_file3 = open(mydictG[Newpermbatch[i]][1][j][2], 'rb')
# output3 = pickle.load(pkl_file3)
# pkl_file3.close()
#
# output.update(output2)
# output.update(output3)
# mat_contents = output[self.layerextract[0]]
# batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
#
# mat_contents = output[self.layerextract[1]]
## batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3_O'
# batch_fc[i][j][:] = mat_contents #'convfc7'
#
# j = j + 1
######################################################################
## for 384,512 image size for testing
if indicator == 1: # training ###################
pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
output = pickle.load(pkl_file1)
pkl_file1.close()
pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
output2 = pickle.load(pkl_file2)
pkl_file2.close()
pkl_file3 = open(mydictG[Newpermbatch[i]][1][j][2], 'rb')
output3 = pickle.load(pkl_file3)
pkl_file3.close()
output.update(output2)
output.update(output3)
mat_contents = output[self.layerextract[0]]
batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
mat_contents = output[self.layerextract[1]]
batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_fc) #'conv5_3_O'
j = j + 1
else: # testing
pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
output = pickle.load(pkl_file1)
pkl_file1.close()
pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
output2 = pickle.load(pkl_file2)
pkl_file2.close()
output.update(output2)
mat_contents = output[self.layerextract[0]]
batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
mat_contents = output[self.layerextract[1]]
batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_fc) #'conv5_3_O'
j = j + 1
#########################################################
if indicator == 1:
J = np.arange(media_length)
np.random.shuffle(J)
temp_arr = batch_conv[i,:media_length,:]
temp_arr = temp_arr[J,:]
batch_conv[i,:media_length,:] = temp_arr
temp_arr = batch_fc[i,:media_length,:]
temp_arr = temp_arr[J,:]
batch_fc[i,:media_length,:] = temp_arr
i = i + 1
return batch_fc, batch_conv
def dense_to_one_hot(self,labels_dense, num_classes=1000):
labels_dense = labels_dense.astype(int)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
labels_one_hot = labels_one_hot.astype(np.float32)
temp = zeros((labels_one_hot.shape[0],self._max_seq,num_classes))
i=0
while i < labels_one_hot.shape[0]:
temp[i][0:int(self._batch_seq[i])] = labels_one_hot[i]
i=i+1
return temp
def next_batch(self,batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
self._perm_list = np.arange(self._num_examples)
np.random.shuffle(self._perm_list)
self._trainLabelsPerm = self._trainLabels[self._perm_list]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self.PrepareTrainingBatch(self._perm_list[start:end], batch_size, 1), self.dense_to_one_hot(self._trainLabelsPerm[start:end])
def PrepareTestingBatch(self,test_total):
start = self._index_in_epoch_test
self._index_in_epoch_test += test_total
if self._index_in_epoch_test > self._num_testexamples:
start = 0
self._index_in_epoch_test = test_total
assert test_total <= self._num_testexamples
end = self._index_in_epoch_test
return self.PrepareTrainingBatch(self._perm_list_test[start:end], test_total, 0), self.dense_to_one_hot(self._testLabels[start:end])
## Testing
def Reset_index_in_epoch_test(self, init_v = 0):
self._index_in_epoch_test = init_v
def crop_image(self, x, target_height=224, target_width=224):
image = skimage.img_as_float(skimage.io.imread(x)).astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
####### Network Parameters ########
training_iters = 10000000 # run 10000 epoch
batch_size = 15
display_step = 280 #280
test_num_total = 15
layername_conv = 'conv5_3'
layername_fc = 'fc7_final'
layername = [layername_conv, layername_fc]
numMap = 512
featMap = 14*14
num_classes = 1000
dropTrain = 0.5
dropTest = 1
prob_path = '/media/titanz/Data3TB/tensorboard_log/model_20180211/prob_256/'
savefigfile ='/media/titanz/Data3TB/tensorboard_log/model_20180309/attn_visual_imsz384/'
#################################
plantclefdata = DataSet(layername,numMap)
# tf Graph input
x = tf.placeholder("float", [None, None, 4096])
data = tf.placeholder("float", [None, None, numMap*14*14])
target = tf.placeholder("float", [None, None, num_classes])
dropout = tf.placeholder(tf.float32)
batch_size2 = tf.placeholder(tf.int32)
model = VariableSequenceClassification(x, data, target, dropout, batch_size2)
sess = tf.InteractiveSession()
#sess = tf.Session()
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
saver = tf.train.Saver(max_to_keep = None)
saver.restore(sess, "/media/titanz/Data3TB/tensorboard_log/model_20180309/model_55160")
#################################################################################
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_list.mat')
testList = mat_contents['test_obs_list']
mat_contents = sio.loadmat('/media/titanz/Data3TB/tensorboard_log/model_20180309/obs_tp_media_re.mat')
obs_tp_media_re = mat_contents['obs_tp_media_re']
imagesz = 1 # choose image size 1 = 256 , 2 = 384, 3 = 512
if imagesz == 1: #256
path_folder = '/media/titanz/Data3TB/PlantclefVGA2/PlantClefImageTest/resize_species/species/'
elif imagesz == 2: #384
path_folder = '/media/titanz/data3TB_02/PlantClefolddata_augmented/PlantClefImageTest_SR/resize_species_384/'
else: #512
path_folder = '/media/titanz/data3TB_02/PlantClefolddata_augmented/PlantClefImageTest_SR/resize_species_512/'
smooth = True
# read python dict back from the testing_256 file
pkl_file_test = open('/home/titanz/tensor_flow/tensorflow-master/tensorflow/examples/RNN/myfile_test_256.pkl', 'rb')
mydict2_test = pickle.load(pkl_file_test)
pkl_file_test.close()
mat_contents = sio.loadmat('/media/titanz/Data3TB/tensorflowlist/species/ClassID_CNNID.mat')
classIDList = mat_contents['ClassID_CNNID']
mediafolderTest_content = '/media/titanz/Data3TB/tensorflowlist/VGG_multipath_res_bn_lastconv/test_obs_media_content_256/'
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_class.mat')
testLabels = mat_contents['test_obs_class']
################################################################################
for count in xrange(obs_tp_media_re.shape[0]):
print('Obs num {:7.0f}'.format(count))
ObsIDchosen = obs_tp_media_re[count][0].astype(int) - 1#8258#12# chose 0 <= x < 13887
Obs_name = testList[ObsIDchosen].astype(int)
Obs_name = str(Obs_name).split('[')
Obs_name = Obs_name[1].split(']')
directory = savefigfile + str(Obs_name[0])
if not os.path.exists(directory):
os.makedirs(directory)
plantclefdata.Reset_index_in_epoch_test(init_v = ObsIDchosen)
(test_data_x, test_data_conv), test_label = plantclefdata.PrepareTestingBatch(test_num_total)
pred, alpha_forward1, alpha_forward2, alpha_forward3, alpha_backward1, alpha_backward2, alpha_backward3 = sess.run(model.alpha_list_com, feed_dict={x: test_data_x, data: test_data_conv, batch_size2: test_num_total, target: test_label, dropout: dropTest})#, batch_size: batch_size})
pred_re = pred[0,:,:]
B = np.argmax(pred_re,axis=1)
alpha_forward1 = np.array(alpha_forward1).swapaxes(1,0) # alpha(max_seq, batch, 196)
alpha_forward2 = np.array(alpha_forward2).swapaxes(1,0) # alpha(max_seq, batch, 196)
alpha_forward3 = np.array(alpha_forward3).swapaxes(1,0) # alpha(max_seq, batch, 196)
mat_contents2 = sio.loadmat(mediafolderTest_content + str(mydict2_test[ObsIDchosen][0]) + '.mat',mat_dtype=True)
used = mat_contents2['v']
alphas1 = np.array(alpha_forward1).swapaxes(1,0) # alpha(max_seq, batch, 196)
alphas1 = alphas1[0:used.shape[1],:,:]
alphas2 = np.array(alpha_forward2).swapaxes(1,0) # alpha(max_seq, batch, 196)
alphas2 = alphas2[0:used.shape[1],:,:]
alphas3 = np.array(alpha_forward3).swapaxes(1,0) # alpha(max_seq, batch, 196)
alphas3 = alphas3[0:used.shape[1],:,:]
pred_re = pred[0,:,:]
pred_re = pred_re[0:used.shape[1],:]
B = np.argmax(pred_re,axis=1)
B = B[0:used.shape[1]]
class_picken = testLabels[ObsIDchosen]
class_picken = class_picken.astype(int)
index_plot = 1;
index_plotnc = 1;
for ii in xrange(alphas1.shape[0]): # eg: 0,1,2 #list(range(0,alphas.shape[0]*2,2)):
organlabel = int(used[0,ii])
if organlabel == 0:
organlabelD = 'Branch'
elif organlabel == 1:
organlabelD = 'Entire'
elif organlabel == 2:
organlabelD = 'Flower'
elif organlabel == 3:
organlabelD = 'Fruit'
elif organlabel == 4:
organlabelD = 'Leaf'
elif organlabel == 5:
organlabelD = 'LeafScan'
else:
organlabelD = 'Stem'
plt.figure(1)
L = mydict2_test[ObsIDchosen][1][ii].split('/')
name_str = L[len(L)-1]
name_str2 = name_str.split('.mat')
name_str3 = name_str2[0]
path = path_folder + '{:04d}'.format(class_picken[0]) + '-' + str(classIDList[class_picken[0]][0]) +'/' + name_str3
img = plantclefdata.crop_image(path)
plt.imshow(img)
if smooth:
alpha_img = skimage.transform.pyramid_expand(alphas1[ii,0,:].reshape(14,14), upscale=16, sigma=20)
else:
alpha_img = skimage.transform.resize(alphas1[ii,0,:].reshape(14,14), [img.shape[0], img.shape[1]])
plt.imshow(alpha_img, alpha=0.7)
plt.set_cmap(cm.Greys_r)
plt.axis('off')
plt.savefig(directory + '/' + "course" + str(ii) + ".png")
plt.imshow(img)
plt.axis('off')
lab2 = organlabelD + '_'+ str(class_picken[0]) + '-' + str(B[ii])
plt.savefig(directory + '/' + lab2 + '_' + str(ii) + ".png")
plt.figure(2)
plt.imshow(img)
if smooth:
alpha_img = skimage.transform.pyramid_expand(alphas2[ii,0,:].reshape(14,14), upscale=16, sigma=20)
else:
alpha_img = skimage.transform.resize(alphas2[ii,0,:].reshape(14,14), [img.shape[0], img.shape[1]])
plt.imshow(alpha_img, alpha=0.7) # show attn
plt.set_cmap(cm.Greys_r)
plt.axis('off')
plt.savefig(directory + '/' + "fine" + str(ii) + ".png")
| bsd-3-clause |
CforED/Machine-Learning | sklearn/datasets/__init__.py | 72 | 3807 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
lin-credible/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
rhyolight/nupic.research | projects/sequence_prediction/continuous_sequence/run_elm.py | 10 | 8061 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
from optparse import OptionParser
from matplotlib import pyplot as plt
import numpy as np
from hpelm import ELM
from swarm_runner import SwarmRunner
from scipy import random
import pandas as pd
from htmresearch.support.sequence_learning_utils import *
from htmresearch.algorithms.online_extreme_learning_machine import OSELM
plt.ion()
def initializeELMnet(nDimInput, nDimOutput, numNeurons=10):
# Build ELM network with nDim input units,
# numNeurons hidden units (LSTM cells) and nDimOutput cells
# net = ELM(nDimInput, nDimOutput)
# net.add_neurons(numNeurons, "sigm")
net = OSELM(nDimInput, nDimOutput,
numHiddenNeurons=numNeurons, activationFunction='sig')
return net
def readDataSet(dataSet):
filePath = 'data/'+dataSet+'.csv'
# df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])
# sequence = df['data']
if dataSet=='nyc_taxi':
df = pd.read_csv(filePath, header=0, skiprows=[1,2],
names=['time', 'data', 'timeofday', 'dayofweek'])
sequence = df['data']
dayofweek = df['dayofweek']
timeofday = df['timeofday']
seq = pd.DataFrame(np.array(pd.concat([sequence, timeofday, dayofweek], axis=1)),
columns=['data', 'timeofday', 'dayofweek'])
elif dataSet=='sine':
df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])
sequence = df['data']
seq = pd.DataFrame(np.array(sequence), columns=['data'])
else:
raise(' unrecognized dataset type ')
return seq
def getTimeEmbeddedMatrix(sequence, numLags=100, predictionStep=1,
useTimeOfDay=True, useDayOfWeek=True):
print "generate time embedded matrix "
print "the training data contains ", str(nTrain-predictionStep), "records"
inDim = numLags + int(useTimeOfDay) + int(useDayOfWeek)
if useTimeOfDay:
print "include time of day as input field"
if useDayOfWeek:
print "include day of week as input field"
X = np.zeros(shape=(len(sequence), inDim))
T = np.zeros(shape=(len(sequence), 1))
for i in xrange(numLags-1, len(sequence)-predictionStep):
if useTimeOfDay and useDayOfWeek:
sample = np.concatenate([np.array(sequence['data'][(i-numLags+1):(i+1)]),
np.array([sequence['timeofday'][i]]),
np.array([sequence['dayofweek'][i]])])
elif useTimeOfDay:
sample = np.concatenate([np.array(sequence['data'][(i-numLags+1):(i+1)]),
np.array([sequence['timeofday'][i]])])
elif useDayOfWeek:
sample = np.concatenate([np.array(sequence['data'][(i-numLags+1):(i+1)]),
np.array([sequence['dayofweek'][i]])])
else:
sample = np.array(sequence['data'][(i-numLags+1):(i+1)])
X[i, :] = sample
T[i, :] = sequence['data'][i+predictionStep]
return (X, T)
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from sine, SantaFe_A, MackeyGlass")
# parser.add_option("-n",
# "--predictionstep",
# type=int,
# default=1,
# dest="predictionstep",
# help="number of steps ahead to be predicted")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def saveResultToFile(dataSet, predictedInput, algorithmName):
inputFileName = 'data/' + dataSet + '.csv'
inputFile = open(inputFileName, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
outputFileName = './prediction/' + dataSet + '_' + algorithmName + '_pred.csv'
outputFile = open(outputFileName, "w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(
['timestamp', 'data', 'prediction-' + str(predictionStep) + 'step'])
csvWriter.writerow(['datetime', 'float', 'float'])
csvWriter.writerow(['', '', ''])
for i in xrange(len(sequence)):
row = csvReader.next()
csvWriter.writerow([row[0], row[1], predictedInput[i]])
inputFile.close()
outputFile.close()
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
print "run ELM on ", dataSet
SWARM_CONFIG = SwarmRunner.importSwarmDescription(dataSet)
predictedField = SWARM_CONFIG['inferenceArgs']['predictedField']
nTrain = SWARM_CONFIG["streamDef"]['streams'][0]['last_record']
predictionStep = SWARM_CONFIG['inferenceArgs']['predictionSteps'][0]
useTimeOfDay = True
useDayOfWeek = True
nTrain = 500
numLags = 100
# prepare dataset as pyBrain sequential dataset
sequence = readDataSet(dataSet)
# standardize data by subtracting mean and dividing by std
meanSeq = np.mean(sequence['data'])
stdSeq = np.std(sequence['data'])
sequence['data'] = (sequence['data'] - meanSeq)/stdSeq
meanTimeOfDay = np.mean(sequence['timeofday'])
stdTimeOfDay = np.std(sequence['timeofday'])
sequence['timeofday'] = (sequence['timeofday'] - meanTimeOfDay)/stdTimeOfDay
meanDayOfWeek = np.mean(sequence['dayofweek'])
stdDayOfWeek = np.std(sequence['dayofweek'])
sequence['dayofweek'] = (sequence['dayofweek'] - meanDayOfWeek)/stdDayOfWeek
(X, T) = getTimeEmbeddedMatrix(sequence, numLags, predictionStep,
useTimeOfDay, useDayOfWeek)
random.seed(6)
net = initializeELMnet(nDimInput=X.shape[1],
nDimOutput=1, numNeurons=50)
net.initializePhase(X[:nTrain, :], T[:nTrain, :])
predictedInput = np.zeros((len(sequence),))
targetInput = np.zeros((len(sequence),))
trueData = np.zeros((len(sequence),))
for i in xrange(nTrain, len(sequence)-predictionStep):
net.train(X[[i], :], T[[i], :])
Y = net.predict(X[[i], :])
predictedInput[i] = Y[-1]
targetInput[i] = sequence['data'][i+predictionStep]
trueData[i] = sequence['data'][i]
print "Iteration {} target input {:2.2f} predicted Input {:2.2f} ".format(
i, targetInput[i], predictedInput[i])
predictedInput = (predictedInput * stdSeq) + meanSeq
targetInput = (targetInput * stdSeq) + meanSeq
trueData = (trueData * stdSeq) + meanSeq
saveResultToFile(dataSet, predictedInput, 'elm')
plt.figure()
plt.plot(targetInput)
plt.plot(predictedInput)
plt.xlim([12800, 13500])
plt.ylim([0, 30000])
skipTrain = 6000
from plot import computeSquareDeviation
squareDeviation = computeSquareDeviation(predictedInput, targetInput)
squareDeviation[:skipTrain] = None
nrmse = np.sqrt(np.nanmean(squareDeviation)) / np.nanstd(targetInput)
print "NRMSE {}".format(nrmse) | gpl-3.0 |
joshloyal/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
skggm/skggm | examples/trace_plot_example.py | 1 | 3138 | """
Visualize Regularization Path
=============================
Plot the edge level coefficients (inverse covariance entries)
as a function of the regularization parameter.
"""
import sys
import numpy as np
from sklearn.datasets import make_sparse_spd_matrix
sys.path.append("..")
from inverse_covariance import QuicGraphicalLasso
from inverse_covariance.plot_util import trace_plot
from inverse_covariance.profiling import LatticeGraph
def make_data(n_samples, n_features):
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng
)
cov = np.linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
return X, cov, prec
def make_data_banded(n_samples, n_features):
alpha = 0.1
cov, prec, adj = LatticeGraph(
n_blocks=2, random_sign=True, chain_blocks=True, seed=1
).create(n_features, alpha)
prng = np.random.RandomState(2)
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
return X, cov, prec
def show_quic_coefficient_trace(X):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=20)
def show_quic_coefficient_trace_truth(X, truth):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=6, ground_truth=truth)
if __name__ == "__main__":
# example 1
n_samples = 10
n_features = 5
X, cov, prec = make_data(n_samples, n_features)
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=10")
print(" n_features=5")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=100")
print(" n_features=5")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
# example 2
n_samples = 110
n_features = 100
X, cov, prec = make_data_banded(n_samples, n_features)
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
| mit |
niun/pyoscope | tests/display_wr.py | 2 | 1164 | #!/usr/bin/env python
#
# PyUSBtmc
# display_channel.py
#
# Copyright (c) 2011 Mike Hadmack
# Copyright (c) 2010 Matt Mets
# This code is distributed under the MIT license
#
# This script is just to test waverunner functionality as a module
#
import numpy
from matplotlib import pyplot
import sys
import os
sys.path.append(os.path.expanduser('.'))
from waverunner import Waverunner
""" Example program to plot the Y-T data from one scope channel
derived from capture_channel_1.py but using new interface methods """
# Initialize our scope
scope = Waverunner("127.0.0.1")
scope.grabData()
data1 = scope.getScaledWaveform(1)
data2 = scope.getScaledWaveform(2)
# Now, generate a time axis.
time = scope.getTimeAxis()
# See if we should use a different time axis
if (time[599] < 1e-3):
time = time * 1e6
tUnit = "uS"
elif (time[599] < 1):
time = time * 1e3
tUnit = "mS"
else:
tUnit = "S"
# close interface
scope.close()
# Plot the data
pyplot.plot(time,data1)
pyplot.plot(time,data2)
pyplot.title("Oscilloscope Data")
pyplot.ylabel("Voltage (V)")
pyplot.xlabel("Time (" + tUnit + ")")
pyplot.xlim(time[0], time[599])
pyplot.show()
| mit |
rgommers/numpy | numpy/core/numeric.py | 7 | 76727 | import functools
import itertools
import operator
import sys
import warnings
import numbers
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
asfortranarray, broadcast, can_cast, compare_chararrays,
concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
from . import overrides
from . import umath
from . import shape_base
from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._exceptions import TooHardError, AxisError
from ._ufunc_config import errstate
bitwise_not = invert
ufunc = type(sin)
newaxis = None
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_zeros_like_dispatcher)
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.zeros_like(y)
array([0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: C
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty : Return a new uninitialized array.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
if like is not None:
return _ones_with_like(shape, dtype=dtype, order=order, like=like)
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
_ones_with_like = array_function_dispatch(
_ones_dispatcher
)(ones)
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_ones_like_dispatcher)
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.ones_like(y)
array([1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, 1, casting='unsafe')
return res
def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
"""
if like is not None:
return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
_full_with_like = array_function_dispatch(
_full_dispatcher
)(full)
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_full_like_dispatcher)
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None, *, keepdims=False):
"""
Counts the number of non-zero values in the array ``a``.
The word "non-zero" is in reference to the Python 2.x
built-in method ``__nonzero__()`` (renamed ``__bool__()``
in Python 3.x) of Python objects that tests an object's
"truthfulness". For example, any number is considered
truthful if it is nonzero, whereas any string is considered
truthful if it is not the empty string. Thus, this function
(recursively) counts how many elements in ``a`` (and in
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
method evaluated to ``True``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
axis : int or tuple, optional
Axis or tuple of axes along which to count non-zeros.
Default is None, meaning that non-zeros will be counted
along a flattened version of ``a``.
.. versionadded:: 1.12.0
keepdims : bool, optional
If this is set to True, the axes that are counted are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.19.0
Returns
-------
count : int or array of int
Number of non-zero values in the array along a given axis.
Otherwise, the total number of non-zero values in the array
is returned.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> a = np.array([[0, 1, 7, 0],
... [3, 0, 2, 19]])
>>> np.count_nonzero(a)
5
>>> np.count_nonzero(a, axis=0)
array([1, 1, 2, 1])
>>> np.count_nonzero(a, axis=1)
array([2, 3])
>>> np.count_nonzero(a, axis=1, keepdims=True)
array([[2],
[3]])
"""
if axis is None and not keepdims:
return multiarray.count_nonzero(a)
a = asanyarray(a)
# TODO: this works around .astype(bool) not working properly (gh-9847)
if np.issubdtype(a.dtype, np.character):
a_bool = a != a.dtype.type()
else:
a_bool = a.astype(np.bool_, copy=False)
return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
@set_module('numpy')
def isfortran(a):
"""
Check if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
a : ndarray
Input array.
Returns
-------
isfortran : bool
Returns True if the array is Fortran contiguous but *not* C contiguous.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='F'))
False
"""
return a.flags.fnc
def _argwhere_dispatcher(a):
return (a,)
@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
non-zero items.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
# nonzero does not behave well on 0d, so promote to 1d
if np.ndim(a) == 0:
a = shape_base.atleast_1d(a)
# then remove the added dimension
return argwhere(a)[:,:0]
return transpose(nonzero(a))
def _flatnonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return np.nonzero(np.ravel(a))[0]
def _correlate_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
c_{av}[k] = sum_n a[n+k] * conj(v[n])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
scipy.signal.correlate : uses FFT which has superior performance on large arrays.
Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is::
c'_{av}[k] = sum_n a[n] conj(v[n+k])
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
be preferable.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
when the two input sequences change places, i.e.,
``c_{va}[k] = c^{*}_{av}[-k]``:
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
return multiarray.correlate2(a, v, mode)
def _convolve_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The discrete convolution operation is defined as
.. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution",
https://en.wikipedia.org/wiki/Convolution
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
return multiarray.correlate(a, v[::-1], mode)
def _outer_dispatcher(a, b, out=None):
return (a, b, out)
@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
A location where the result is stored
.. versionadded:: 1.9.0
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to dimensions other than 1D and other
operations. ``np.multiply.outer(a.ravel(), b.ravel())``
is the equivalent.
tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
def _tensordot_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes.
Given two tensors, `a` and `b`, and an array_like object containing
two array_like objects, ``(a_axes, b_axes)``, sum the products of
`a`'s and `b`'s elements (components) over the axes specified by
``a_axes`` and ``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
of `a` and the first ``N`` dimensions of `b` are summed over.
Parameters
----------
a, b : array_like
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
Returns
-------
output : ndarray
The tensor dot product of the input.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
The shape of the result consists of the non-contracted axes of the
first tensor, followed by the non-contracted axes of the second.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]])
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([['a', 'b'],
['c', 'd']], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, 1)
array([[['acc', 'bdd'],
['aaacccc', 'bbbdddd']],
[['aaaaacccccc', 'bbbbbdddddd'],
['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[['a', 'b'],
['c', 'd']],
...
>>> np.tensordot(a, A, (0, 1))
array([[['abbbbb', 'cddddd'],
['aabbbbbb', 'ccdddddd']],
[['aaabbbbbbb', 'cccddddddd'],
['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[['abb', 'cdd'],
['aaabbbb', 'cccdddd']],
[['aaaaabbbbbb', 'cccccdddddd'],
['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
"""
try:
iter(axes)
except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def _roll_dispatcher(a, shift, axis=None):
return (a,)
@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Notes
-----
.. versionadded:: 1.12.0
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> np.roll(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, -1)
array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 0]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
>>> np.roll(x2, -1, axis=1)
array([[1, 2, 3, 4, 0],
[6, 7, 8, 9, 5]])
"""
a = asanyarray(a)
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
else:
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
broadcasted = broadcast(shift, axis)
if broadcasted.ndim > 1:
raise ValueError(
"'shift' and 'axis' should be scalars or 1D sequences")
shifts = {ax: 0 for ax in range(a.ndim)}
for sh, ax in broadcasted:
shifts[ax] += sh
rolls = [((slice(None), slice(None)),)] * a.ndim
for ax, offset in shifts.items():
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
if offset:
# (original, result), (original, result)
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
(slice(-offset, None), slice(None, offset)))
result = empty_like(a)
for indices in itertools.product(*rolls):
arr_index, res_index = zip(*indices)
result[res_index] = a[arr_index]
return result
def _rollaxis_dispatcher(a, axis, start=None):
return (a,)
@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
This function continues to be supported for backward compatibility, but you
should prefer `moveaxis`. The `moveaxis` function was added in NumPy
1.11.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to be rolled. The positions of the other axes do not
change relative to one another.
start : int, optional
When ``start <= axis``, the axis is rolled back until it lies in
this position. When ``start > axis``, the axis is rolled until it
lies before this position. The default, 0, results in a "complete"
roll. The following table describes how negative values of ``start``
are interpreted:
.. table::
:align: left
+-------------------+----------------------+
| ``start`` | Normalized ``start`` |
+===================+======================+
| ``-(arr.ndim+1)`` | raise ``AxisError`` |
+-------------------+----------------------+
| ``-arr.ndim`` | 0 |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``-1`` | ``arr.ndim-1`` |
+-------------------+----------------------+
| ``0`` | ``0`` |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``arr.ndim`` | ``arr.ndim`` |
+-------------------+----------------------+
| ``arr.ndim + 1`` | raise ``AxisError`` |
+-------------------+----------------------+
.. |vdots| unicode:: U+22EE .. Vertical Ellipsis
Returns
-------
res : ndarray
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
NumPy versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
See Also
--------
moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return a[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If any axis provided is out of range
ValueError
If an axis is repeated
See also
--------
normalize_axis_index : normalizing a single scalar axis
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
else:
raise ValueError('repeated axis')
return axis
def _moveaxis_dispatcher(a, source, destination):
return (a,)
@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
.. versionadded:: 1.11.0
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose : Permute the dimensions of an array.
swapaxes : Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
try:
# allow duck-array types if they define transpose
transpose = a.transpose
except AttributeError:
a = asarray(a)
transpose = a.transpose
source = normalize_axis_tuple(source, a.ndim, 'source')
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = transpose(order)
return result
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return moveaxis(a, axis, 0)
def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
return (a, b)
@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
array(-3)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
multiply(a0, b2, out=cp1)
negative(cp1, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
tmp = array(a2 * b1)
cp0 -= tmp
multiply(a2, b0, out=cp1)
multiply(a0, b2, out=tmp)
cp1 -= tmp
multiply(a0, b1, out=cp2)
multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a2, b1, out=cp0)
negative(cp0, out=cp0)
multiply(a2, b0, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@set_module('numpy')
def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]])
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
if sparse:
res = tuple()
else:
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
if sparse:
res = res + (idx,)
else:
res[i] = idx
return res
def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
if like is not None:
return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
_fromfunction_with_like = array_function_dispatch(
_fromfunction_dispatcher
)(fromfunction)
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@set_module('numpy')
def isscalar(element):
"""
Returns True if the type of `element` is a scalar type.
Parameters
----------
element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `element` is a scalar type, False if it is not.
See Also
--------
ndim : Get the number of dimensions of an array
Notes
-----
If you need a stricter way to identify a *numerical* scalar, use
``isinstance(x, numbers.Number)``, as that returns ``False`` for most
non-numerical elements such as strings.
In most cases ``np.ndim(x) == 0`` should be used instead of this function,
as that will also return true for 0d arrays. This is how numpy overloads
functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
+======================================+===============+===================+
| PEP 3141 numeric objects (including | ``True`` | ``True`` |
| builtins) | | |
+--------------------------------------+---------------+-------------------+
| builtin string and buffer objects | ``True`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other builtin objects, like | ``False`` | ``True`` |
| `pathlib.Path`, `Exception`, | | |
| the result of `re.compile` | | |
+--------------------------------------+---------------+-------------------+
| third-party objects like | ``False`` | ``True`` |
| `matplotlib.figure.Figure` | | |
+--------------------------------------+---------------+-------------------+
| zero-dimensional numpy arrays | ``False`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other numpy arrays | ``False`` | ``False`` |
+--------------------------------------+---------------+-------------------+
| `list`, `tuple`, and other sequence | ``False`` | ``False`` |
| objects | | |
+--------------------------------------+---------------+-------------------+
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar(np.array(3.1))
False
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
>>> np.isscalar('numpy')
True
NumPy supports PEP 3141 numbers:
>>> from fractions import Fraction
>>> np.isscalar(Fraction(5, 17))
True
>>> from numbers import Number
>>> np.isscalar(Number())
True
"""
return (isinstance(element, generic)
or type(element) in ScalarType
or isinstance(element, numbers.Number))
@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = (binwidth if width is None
else max(binwidth, width))
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return '-' + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def _identity_dispatcher(n, dtype=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if like is not None:
return _identity_with_like(n, dtype=dtype, like=like)
from numpy import eye
return eye(n, dtype=dtype, like=like)
_identity_with_like = array_function_dispatch(
_identity_dispatcher
)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any, equal
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in
some rare cases.
The comparison of `a` and `b` uses standard broadcasting, which
means that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for
`equal` but not `array_equal`.
`allclose` is not defined for non-numeric data types.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
True
"""
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
math.isclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Unlike the built-in `math.isclose`, the above equation is not symmetric
in `a` and `b` -- it assumes `b` is the reference value -- so that
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
the default value of atol is not zero, and is used to determine what
small values should be considered close to zero. The default value is
appropriate for expected values of order unity: if the expected values
are significantly smaller than one, it can result in false positives.
`atol` should be carefully selected for the use case at hand. A zero value
for `atol` will result in `False` if either `a` or `b` is zero.
`isclose` is not defined for non-numeric data types.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True])
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
array([ True, False])
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
array([False, False])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
array([ True, True])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
array([False, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
return less_equal(abs(x-y), atol + rtol * abs(y))
x = asanyarray(a)
y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dt = multiarray.result_type(y, 1.)
y = asanyarray(y, dtype=dt)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
# Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
return cond[()] # Flatten 0d arrays to scalars
def _array_equal_dispatcher(a1, a2, equal_nan=None):
return (a1, a2)
@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
.. versionadded:: 1.19.0
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
if not equal_nan:
return bool(asarray(a1 == a2).all())
# Handling NaN values if equal_nan is True
a1nan, a2nan = isnan(a1), isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
def _array_equiv_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
try:
multiarray.broadcast(a1, a2)
except Exception:
return False
return bool(asarray(a1 == a2).all())
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
def extend_all(module):
existing = set(__all__)
mall = getattr(module, '__all__')
for a in mall:
if a not in existing:
__all__.append(a)
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
from . import arrayprint
from .arrayprint import *
from . import _asarray
from ._asarray import *
from . import _ufunc_config
from ._ufunc_config import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
extend_all(arrayprint)
extend_all(_asarray)
extend_all(_ufunc_config)
| bsd-3-clause |
clairetang6/bokeh | bokeh/charts/builders/bar_builder.py | 5 | 12416 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Bar class which lets you build your Bar charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
from ..builder import Builder, create_and_build
from ...models import FactorRange, Range1d
from ..glyphs import BarGlyph
from ...core.properties import Float, Enum, Bool, Override
from ..properties import Dimension
from ..attributes import ColorAttr, CatAttr
from ..operations import Stack, Dodge
from ...core.enums import Aggregation
from ..stats import stats
from ...models.sources import ColumnDataSource
from ..utils import help
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
class BarBuilder(Builder):
"""This is the Bar builder and it is in charge of plotting
Bar chart (grouped and stacked) in an easy and intuitive way.
Essentially, it utilizes a standardized way to ingest the data,
make the proper calculations and generate renderers. The renderers
reference the transformed data, which represent the groups of data
that were derived from the inputs. We additionally make calculations
for the ranges.
The x_range is categorical, and is made either from the label argument
or from the `pandas.DataFrame.index`. The y_range can be supplied as the
parameter continuous_range, or will be calculated as a linear range
(Range1d) based on the supplied values.
The bar builder is and can be further used as a base class for other
builders that might also be performing some aggregation across
derived groups of data.
"""
# ToDo: add label back as a discrete dimension
values = Dimension('values')
dimensions = ['values']
# req_dimensions = [['values']]
default_attributes = {'label': CatAttr(),
'color': ColorAttr(),
'line_color': ColorAttr(default='white'),
'stack': CatAttr(),
'group': CatAttr()}
agg = Enum(Aggregation, default='sum')
max_height = Float(1.0)
min_height = Float(0.0)
bar_width = Float(default=0.8)
fill_alpha = Float(default=0.8)
glyph = BarGlyph
comp_glyph_types = Override(default=[BarGlyph])
label_attributes = ['stack', 'group']
label_only = Bool(False)
values_only = Bool(False)
_perform_stack = False
_perform_group = False
def setup(self):
if self.attributes['color'].columns is None:
if self.attributes['stack'].columns is not None:
self.attributes['color'].setup(columns=self.attributes['stack'].columns)
if self.attributes['group'].columns is not None:
self.attributes['color'].setup(columns=self.attributes['group'].columns)
if self.attributes['stack'].columns is not None:
self._perform_stack = True
if self.attributes['group'].columns is not None:
self._perform_group = True
# ToDo: perform aggregation validation
# Not given values kw, so using only categorical data
if self.values.dtype.name == 'object' and len(self.attribute_columns) == 0:
# agg must be count
self.agg = 'count'
self.attributes['label'].set_columns(self.values.selection)
else:
pass
self._apply_inferred_index()
if self.xlabel is None:
if self.attributes['label'].columns is not None:
self.xlabel = str(
', '.join(self.attributes['label'].columns).title()).title()
else:
self.xlabel = self.values.selection
if self.ylabel is None:
if not self.label_only:
self.ylabel = '%s( %s )' % (
self.agg.title(), str(self.values.selection).title())
else:
self.ylabel = '%s( %s )' % (
self.agg.title(), ', '.join(self.attributes['label'].columns).title())
def _apply_inferred_index(self):
"""Configure chart when labels are provided as index instead of as kwarg."""
# try to infer grouping vs stacking labels
if (self.attributes['label'].columns is None and
self.values.selection is not None):
if self.attributes['stack'].columns is not None:
special_column = 'unity'
else:
special_column = 'index'
self._data['label'] = special_column
self.attributes['label'].setup(data=ColumnDataSource(self._data.df),
columns=special_column)
self.xlabel = ''
def set_ranges(self):
"""Push the Bar data into the ColumnDataSource and calculate
the proper ranges.
"""
x_items = self.attributes['label'].items
if x_items is None:
x_items = ''
x_labels = []
# Items are identified by tuples. If the tuple has a single value,
# we unpack it
for item in x_items:
item = self._get_label(item)
x_labels.append(str(item))
self.x_range = FactorRange(factors=x_labels)
y_shift = abs(0.1 * ((self.min_height + self.max_height) / 2))
if self.min_height < 0:
start = self.min_height - y_shift
else:
start = 0.0
if self.max_height > 0:
end = self.max_height + y_shift
else:
end = 0.0
self.y_range = Range1d(start=start, end=end)
def get_extra_args(self):
if self.__class__ is not BarBuilder:
attrs = self.properties(with_bases=False)
return {attr: getattr(self, attr) for attr in attrs}
else:
return {}
def yield_renderers(self):
"""Use the rect glyphs to display the bars.
Takes reference points from data loaded at the ColumnDataSource.
"""
kwargs = self.get_extra_args()
attrs = self.collect_attr_kwargs()
for group in self._data.groupby(**self.attributes):
glyph_kwargs = self.get_group_kwargs(group, attrs)
group_kwargs = kwargs.copy()
group_kwargs.update(glyph_kwargs)
props = self.glyph.properties().difference(set(['label']))
# make sure we always pass the color and line color
for k in ['color', 'line_color']:
group_kwargs[k] = group[k]
# TODO(fpliger): we shouldn't need to do this to ensure we don't
# have extra kwargs... this is needed now because
# of label, group and stack being "special"
for k in set(group_kwargs):
if k not in props:
group_kwargs.pop(k)
bg = self.glyph(label=group.label,
x_label=self._get_label(group['label']),
values=group.data[self.values.selection].values,
agg=stats[self.agg](),
width=self.bar_width,
fill_alpha=self.fill_alpha,
stack_label=self._get_label(group['stack']),
dodge_label=self._get_label(group['group']),
**group_kwargs)
self.add_glyph(group, bg)
if self._perform_stack:
Stack().apply(self.comp_glyphs)
if self._perform_group:
Dodge().apply(self.comp_glyphs)
# a higher level function of bar chart is to keep track of max height of all bars
self.max_height = max([renderer.y_max for renderer in self.comp_glyphs])
self.min_height = min([renderer.y_min for renderer in self.comp_glyphs])
for renderer in self.comp_glyphs:
for sub_renderer in renderer.renderers:
yield sub_renderer
@help(BarBuilder)
def Bar(data, label=None, values=None, color=None, stack=None, group=None, agg="sum",
xscale="categorical", yscale="linear", xgrid=False, ygrid=True,
continuous_range=None, **kw):
""" Create a Bar chart using :class:`BarBuilder <bokeh.charts.builders.bar_builder.BarBuilder>`
render the geometry from values, cat and stacked.
Args:
data (:ref:`userguide_charts_data_types`): the data
source for the chart.
label (list(str) or str, optional): list of string representing the categories.
(Defaults to None)
values (str, optional): iterable 2d representing the data series
values matrix.
color (str or list(str) or `~bokeh.charts._attributes.ColorAttr`): string color,
string column name, list of string columns or a custom `ColorAttr`,
which replaces the default `ColorAttr` for the builder.
stack (list(str) or str, optional): columns to use for stacking.
(Defaults to False, so grouping is assumed)
group (list(str) or str, optional): columns to use for grouping.
agg (str): how to aggregate the `values`. (Defaults to 'sum', or only label is
provided, then performs a `count`)
continuous_range(Range1d, optional): Custom continuous_range to be
used. (Defaults to None)
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
Returns:
:class:`Chart`: includes glyph renderers that generate bars
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Bar, output_file, show, hplot
# best support is with data in a format that is table-like
data = {
'sample': ['1st', '2nd', '1st', '2nd', '1st', '2nd'],
'interpreter': ['python', 'python', 'pypy', 'pypy', 'jython', 'jython'],
'timing': [-2, 5, 12, 40, 22, 30]
}
# x-axis labels pulled from the interpreter column, stacking labels from sample column
bar = Bar(data, values='timing', label='interpreter', stack='sample', agg='mean',
title="Python Interpreter Sampling", legend='top_right', plot_width=400)
# table-like data results in reconfiguration of the chart with no data manipulation
bar2 = Bar(data, values='timing', label=['interpreter', 'sample'],
agg='mean', title="Python Interpreters", plot_width=400)
output_file("stacked_bar.html")
show(hplot(bar, bar2))
"""
if continuous_range and not isinstance(continuous_range, Range1d):
raise ValueError(
"continuous_range must be an instance of bokeh.models.ranges.Range1d"
)
if label is not None and values is None:
kw['label_only'] = True
if (agg == 'sum') or (agg == 'mean'):
agg = 'count'
values = label
# The continuous_range is the y_range (until we implement HBar charts)
y_range = continuous_range
kw['label'] = label
kw['values'] = values
kw['color'] = color
kw['stack'] = stack
kw['group'] = group
kw['agg'] = agg
kw['xscale'] = xscale
kw['yscale'] = yscale
kw['xgrid'] = xgrid
kw['ygrid'] = ygrid
kw['y_range'] = y_range
chart = create_and_build(BarBuilder, data, **kw)
# hide x labels if there is a single value, implying stacking only
if len(chart.x_range.factors) == 1 and not label:
chart.below[0].visible = False
return chart
| bsd-3-clause |
Balandat/cont_no_regret | old_code/testing.py | 1 | 3136 | '''
Created on Feb 24, 2015
@author: balandat
'''
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
from ContNoRegret.Domains import S
from ContNoRegret.Distributions import Uniform
from ContNoRegret.utils import create_random_Sigmas
from ContNoRegret.LossFunctions import GaussianLossFunction
from scipy.stats import expon
from scipy.interpolate import SmoothBivariateSpline, LSQBivariateSpline
# def compute_constants(gamma):
# c = (gamma-1)**(-1)
# a2 = gamma*(1+gamma)/2
# a1 = gamma - 2*c*a2
# a0 = 1 - c*a1 - c**2*a2
# return c, np.array([a0, a1, a2])
#
# def phi(u, gamma):
# c,a = compute_constants(gamma)
# return ( (u<c)*(gamma/(gamma-1)-np.minimum(u,c))**(-gamma) +
# (u>=c)*(a[0]+a[1]*np.maximum(u,c)+a[2]*np.maximum(u,c)**2) )
#
# def phi_prime(u, gamma):
# c,a = compute_constants(gamma)
# return (u<c)*gamma*(gamma/(gamma-1)-np.minimum(u,c))**(-(1+gamma)) + (u>=c)*(a[1]+2*a[2]*np.maximum(u,c))
#
# def phi_double_prime(u, gamma):
# c,a = compute_constants(gamma)
# return (u<c)*gamma*(1+gamma)*(gamma/(gamma-1)-np.minimum(u,c))**(-(2+gamma)) + (u>=c)*2*a[2]
#
# def phi_inv(u, gamma):
# c,a = compute_constants(gamma)
# b = phi(c, gamma)
# return ( (u<b)*(gamma/(gamma-1)-np.minimum(u,b)**(-1/gamma)) +
# (u>=b)*(-a[1]/2/a[2]+np.sqrt(a[1]**2/4/a[2]**2 - (a[0]-np.maximum(u,b))/a[2])) )
#
# def phi_inv_prime(u, gamma):
# return 1/phi_prime(phi_inv(u, gamma))
#
#
# # Plot some functions
# gammas = [1.25, 1.5, 1.75, 2, 3]
# u = np.linspace(-1.5,5,10000)
# v = np.linspace(0.001,10,10000)
# f,axs = plt.subplots(3,1)
# axs[0].plot(u, np.exp(u-1))
# axs[1].plot(u, np.exp(u-1))
# axs[2].plot(u, np.exp(u-1))
# for gamma in gammas:
# axs[0].plot(u, phi(u,gamma))
# axs[1].plot(u, phi_prime(u,gamma))
# axs[2].plot(u, phi_double_prime(u,gamma))
# plt.show()
# for gamma in gammas:
# # gamma = 1.5
# ctilde = gamma/(gamma-1)
# a2 = 0.5*gamma*(1+gamma)/((ctilde-1)**(2+gamma))
# a1 = gamma/((ctilde-1)**(1+gamma)) - 2*a2
# a0 = 1/((ctilde-1)**gamma) - a1 - a2
#
# def phi(u):
# return (u<1)*(ctilde-np.minimum(u,1))**(-gamma) + (u>=1)*(a0+a1*np.maximum(u,1)+a2*np.maximum(u,1)**2)
#
# def phiprime(u):
# return (u<1)*gamma*(ctilde-np.minimum(u,1))**(-(1+gamma)) + (u>=1)*(a1+2*a2*np.maximum(u,1))
#
# def phiinv(u):
# return (u<1)*(ctilde-np.minimum(u,1)**(-1/gamma)) + (u>=1)*(-a1/2/a2+np.sqrt(a1**2/4/a2**2 - (a0-np.maximum(u,1))/a2))
#
# def phiinvprime(u):
# return 1/phiprime(phiinv(u))
# # return (u<1)/gamma*u**(-1+1/gamma) + (u>=1)*(a1**2-4*a2*(a0-np.maximum(u,1)))**(-1/2)
#
#
# # fig2, (ax2, ax3) = plt.subplots(2, 1)
# # fig3, ax4 = plt.subplots(1)
#
# ax1.plot(u, phi(u))#, u, np.exp(u-1))
# # v = np.linspace(0.001, 5, 10000)
# # ax2.plot(v, phiinv(v), v, 1+np.log(v))
# # ax3.plot(v, phiinvprime(v), v, 1/v)
# # ax4.plot(v, phiinvprime(v)-1/(3*v))
# # print(np.min(phiinvprime(v)-1/(3+v))
# plt.show()
| mit |
mjudsp/Tsallis | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
shusenl/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.19/_downloads/33b5e3cff5c172d72c79c6eec192b031/plot_label_from_stc.py | 20 | 4093 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <luke.bloy@gmail.com>
# Alex Gramfort <alexandre.gramfort@inria.fr>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
# 8.5% of original source space vertices were omitted during forward
# calculation, suppress the warning here with verbose='error'
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True,
verbose='error')
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
hfut721/RPN | tools/demo.py | 10 | 5028 | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| mit |