repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
PTDreamer/dRonin | python/ins/cins.py | 11 | 3838 |
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import ins
# this is the set of (currently) recommend INS settings. modified from
# https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav
default_mag_var = numpy.array([10.0, 10.0, 100.0])
default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4])
default_accel_var = numpy.array([0.01, 0.01, 0.01])
default_baro_var = 0.1
default_gps_var=numpy.array([1e-3,1e-2,10])
class CINS:
GRAV = 9.805
def __init__(self):
""" Creates the CINS class.
Important variables are
* X - the vector of state variables
* Xd - the vector of state derivatives for state and inputs
* Y - the vector of outputs for current state value
"""
self.state = []
def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None):
""" configure the INS parameters """
if mag_var is not None:
ins.configure(mag_var=mag_var)
if gyro_var is not None:
ins.configure(gyro_var=gyro_var)
if accel_var is not None:
ins.configure(accel_var=accel_var)
if baro_var is not None:
ins.configure(baro_var=baro_var)
if gps_var is not None:
ins.configure(gps_var=gps_var)
def prepare(self):
""" prepare the C INS wrapper
"""
self.state = ins.init()
self.configure(
mag_var=default_mag_var,
gyro_var=default_gyro_var,
accel_var=default_accel_var,
baro_var=default_baro_var,
gps_var=default_gps_var
)
def predict(self, gyros, accels, dT = 1.0/666.0):
""" Perform the prediction step
"""
self.state = ins.prediction(gyros, accels, dT)
def correction(self, pos=None, vel=None, mag=None, baro=None):
""" Perform the INS correction based on the provided corrections
"""
sensors = 0
Z = numpy.zeros((10,),numpy.float64)
# the masks must match the values in insgps.h
if pos is not None:
sensors = sensors | 0x0003
Z[0] = pos[0]
Z[1] = pos[1]
if vel is not None:
sensors = sensors | 0x0038
Z[3] = vel[0]
Z[4] = vel[1]
Z[5] = vel[2]
if mag is not None:
sensors = sensors | 0x01C0
Z[6] = mag[0]
Z[7] = mag[1]
Z[8] = mag[2]
if baro is not None:
sensors = sensors | 0x0200
Z[9] = baro
self.state = ins.correction(Z, sensors)
def test():
""" test the INS with simulated data
"""
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
sim = PyINS()
sim.prepare()
dT = 1.0 / 666.0
STEPS = 100000
history = numpy.zeros((STEPS,16))
history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
ROLL = 0.1
YAW = 0.2
sim.predict(U=[0,0,YAW, 0, PyINS.GRAV*sin(ROLL), -PyINS.GRAV*cos(ROLL) - 0.0], dT=dT)
history[k,:] = sim.state
history_rpy[k,:] = quat_rpy(sim.state[6:10])
times[k] = k * dT
angle = 0*numpy.pi/3 + YAW * dT * k # radians
height = 1.0 * k * dT
if True and k % 60 == 59:
sim.correction(pos=[[10],[5],[-height]])
if True and k % 60 == 59:
sim.correction(vel=[[0],[0],[-1]])
if k % 20 == 8:
sim.correction(baro=[height])
if True and k % 20 == 15:
sim.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]])
if k % 1000 == 0:
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],history[0:k:4,0:3])
ax[0][0].set_title('Position')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.draw()
fig.show()
plt.show()
if __name__ =='__main__':
test() | gpl-3.0 |
q1ang/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
Zhang-O/small | tensor__cpu/http/spyser_liyou.py | 1 | 5473 | import urllib.request
from bs4 import BeautifulSoup
import re
import urllib.parse
import xlsxwriter
import pandas as pd
import numpy as np
from urllib import request, parse
from urllib.error import URLError
import json
import multiprocessing
import time
# 详情页面的 地址 存放在这里面
urls_of_detail = []
total_pages = 0
# 要爬取的内容 按序存成数组
_1 = []
_2 = []
_3 = []
_4 = []
_5 = []
issue_date_sum = []
project_address_sum = []
project_sector_sum = []
project_content_sum = []
company_name_sum = []
company_staff_sum = []
company_phone_sum = []
# 一级网址
url = 'http://www.stc.gov.cn/ZWGK/TZGG/GGSB/'
# page 表示第几页
def get_urls(url,page):
# 构造 form 数据
# postdata = urllib.parse.urlencode({'currDistrict': '', 'pageNo': page,'hpjgName_hidden':'','keyWordName':''})
# postdata = postdata.encode('utf-8')
#
# #发送请求
# response = urllib.request.urlopen(url, data=postdata)
# html_cont = response.read()
if page == 0:
url = url + 'index.htm'
else:
url = url + 'index_' + str(page) + '.htm'
req = request.Request(url=url)
res_data = request.urlopen(req)
# print(res_data)
html_cont = res_data.read()
# 解析文档树
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
#
# # 用正则表达式 查找 二级网站的网址 所在的 元素 tr
trs = soup.find_all('a', href=re.compile(r"^./201"))
# # 把 二级网站的网址存到 urls_of_detail 中
for i in trs:
# print(i['href'][2:])
urls_of_detail.append(i['href'][2:])
def get_info(url,second_url):
# s = urllib.request.urlopen(urls_of_detail[0])
# 请求文档
second_url = url + second_url
s = urllib.request.urlopen(second_url)
# 解析文档
soup = BeautifulSoup(s, 'html.parser', from_encoding='utf-8')
# 查找的内容 在 td 元素内 ,且没有任何唯一标识 ,找到所有td ,查看每个待爬取得内容在 list 中 的索引
div = soup.find_all('div', class_=re.compile(r"TRS_Editor"))
trs = div[0].find_all('tr')
trs = trs[1:]
# print(trs[0])
print('trs num',len(trs))
for tr in trs:
tds = tr.find_all('td')
if len(tds[0].find_all('font')) > 0 :
if tds[3].find_all('font')[0].string == None:
print(second_url)
_1.append(tds[0].find_all('font')[0].string)
_2.append(tds[1].find_all('font')[0].string)
_3.append(tds[2].find_all('font')[0].string)
_4.append(tds[3].find_all('font')[0].string)
if len(tds) == 5:
_5.append(tds[4].find_all('font')[0].string)
else:
_5.append('null')
elif len(tds[0].find_all('p')) > 0 :
# if tds[3].find_all('p')[0].string == None:
# print(second_url)
_1.append(tds[0].find_all('p')[0].string)
_2.append(tds[1].find_all('p')[0].string)
_3.append(tds[2].find_all('p')[0].string)
if len(tds[3].find_all('p')) > 0:
_4.append(tds[3].find_all('p')[0].string)
else:
_4.append(tds[3].string)
if len(tds) == 5:
_5.append(tds[4])
else:
_5.append('null')
else:
if tds[3].string == None:
print(second_url)
_1.append(tds[0].string)
_2.append(tds[1].string)
if len(tds[2].find_all('span'))>0 and tds[2].find_all('span')[0].string == None:
_3.append(tds[2].string)
else:
_3.append(tds[2].string)
_4.append(tds[3].string)
if len(tds) == 5:
_5.append(tds[4].string)
else:
_5.append('null')
# elif len(tds[0].find_all('td'))
# print(len(tds))
# print(tds[0].string)
# print(tds[1].string)
# print(tds[2].string)
# print(tds[3].string)
# print(response.read().decode('utf-8','ignore'))
# 网站显示一共有 1036 页
num0 =0
for page in range(0,7):
num0 += 1
# print(num0)
get_urls(url, page)
# 把所有的二级网站 存成文本
with open('urls_all_liyou','w') as f:
f.write(str(urls_of_detail))
# print(len(urls_of_detail))
# print(len(set(urls_of_detail)))
print('urls num :' , len(urls_of_detail))
num=0 # 这个主要用于调试 爬的过程中如果出错 看看是在哪个网址出的
for second_url in urls_of_detail:
num += 1
print('page num : ', num)
if num in [15,42]:
continue
if num > 54:
break
get_info(url, second_url)
print('end ----------')
print(len(_1))
workbook = xlsxwriter.Workbook('./liyou.xlsx')
# 1.------------------ 创建一个 worksheet 存放具体分数-------------------------------
ws = workbook.add_worksheet('liyou')
#设置宽度
ws.set_column('A:A', 25)
ws.set_column('B:B', 25)
ws.set_column('C:C', 15)
ws.set_column('D:D', 15)
ws.set_column('E:E', 15)
# 写表头
ws.write(0, 0, '序号')
ws.write(0, 1, '区域')
ws.write(0, 2, '类型')
ws.write(0, 3, '设置地点')
ws.write(0, 4, '方向')
number = len(_1)
for i in range(number):
ws.write(i + 1, 0, str(_1[i]))
ws.write(i + 1, 1, str(_2[i]))
ws.write(i + 1, 2, str(_3[i]))
ws.write(i + 1, 3, str(_4[i]))
ws.write(i + 1, 4, str(_5[i]))
workbook.close()
| mit |
harisbal/pandas | pandas/tests/test_panel.py | 1 | 95658 | # -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from warnings import catch_warnings, simplefilter
from datetime import datetime
import operator
import pytest
import numpy as np
from pandas.core.dtypes.common import is_float_dtype
from pandas import (Series, DataFrame, Index, date_range, isna, notna,
MultiIndex)
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.io.formats.printing import pprint_thing
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas.tseries.offsets import BDay, MonthEnd
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
ensure_clean, makeMixedDataFrame,
makeCustomDataframe as mkdf)
import pandas.core.panel as panelm
import pandas.util.testing as tm
import pandas.util._test_decorators as td
def make_test_panel():
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
_panel = tm.makePanel()
tm.add_nans(_panel)
_panel = _panel.copy()
return _panel
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = tm.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
pytest.raises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForLongAndSparse(object):
def test_repr(self):
repr(self.panel)
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
assert getattr(self.panel, attr).name is None
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
@td.skip_if_no("numpy", min_version="1.10.0")
def test_prod(self):
self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod)
@pytest.mark.filterwarnings("ignore:Invalid value:RuntimeWarning")
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
@pytest.mark.filterwarnings("ignore:Invalid value:RuntimeWarning")
def test_min(self):
self._check_stat_op('min', np.min)
@pytest.mark.filterwarnings("ignore:Invalid value:RuntimeWarning")
def test_max(self):
self._check_stat_op('max', np.max)
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True,
skipna_alternative=None):
if obj is None:
obj = self.panel
# # set some NAs
# obj.loc[5:10] = np.nan
# obj.loc[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if name in ['sum', 'prod']:
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
pytest.raises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
tm.assert_raises_regex(NotImplementedError, name, f,
numeric_only=True)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
assert 'ItemA' not in self.panel._item_cache
assert self.panel.items is new_items
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
assert self.panel[0].index is new_major
assert self.panel.major_axis is new_major
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
assert self.panel[0].columns is new_minor
assert self.panel.minor_axis is new_minor
def test_get_axis_number(self):
assert self.panel._get_axis_number('items') == 0
assert self.panel._get_axis_number('major') == 1
assert self.panel._get_axis_number('minor') == 2
with tm.assert_raises_regex(ValueError, "No axis named foo"):
self.panel._get_axis_number('foo')
with tm.assert_raises_regex(ValueError, "No axis named foo"):
self.panel.__ge__(self.panel, axis='foo')
def test_get_axis_name(self):
assert self.panel._get_axis_name(0) == 'items'
assert self.panel._get_axis_name(1) == 'major_axis'
assert self.panel._get_axis_name(2) == 'minor_axis'
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
pytest.raises(Exception, self.panel.__add__,
self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
assert len(list(self.panel.iteritems())) == len(self.panel.items)
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(
result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow', 'mod']
if not compat.PY3:
ops.append('div')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
pprint_thing("Failing operation: %r" % 'div')
raise
def test_combinePanel(self):
result = self.panel.add(self.panel)
assert_panel_equal(result, self.panel * 2)
def test_neg(self):
assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
def test_select(self):
p = self.panel
# select items
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
assert_panel_equal(result, expected)
# select major_axis
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = p.select(lambda x: x >= datetime(
2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
assert_panel_equal(result, expected)
# select minor_axis
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
# corner case, empty thing
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = p.select(lambda x: x in ('foo', ), axis='items')
assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
assert_panel_equal(result, expected)
assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
assert result.name == 'A'
assert result2.name == 'A'
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_getitem(self):
pytest.raises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
assert 'ItemA' not in self.panel.items
del self.panel['ItemB']
assert 'ItemB' not in self.panel.items
pytest.raises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with pytest.raises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(
index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
assert self.panel['ItemG'].values.dtype == np.int64
assert self.panel['ItemE'].values.dtype == np.bool_
# object dtype
self.panel['ItemQ'] = 'foo'
assert self.panel['ItemQ'].values.dtype == np.object_
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
assert self.panel['ItemP'].values.dtype == np.bool_
pytest.raises(TypeError, self.panel.__setitem__, 'foo',
self.panel.loc[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assert_raises_regex(ValueError,
r"shape of value must be "
r"\(3, 2\), shape of given "
r"object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
assert result.name == 'ItemA'
# not contained
idx = self.panel.major_axis[0] - BDay()
pytest.raises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
assert xs['ItemA'].dtype == np.float64
assert xs['ItemD'].dtype == np.object_
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
pytest.raises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
assert xs['ItemA'].dtype == np.float64
assert xs['ItemD'].dtype == np.object_
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
tm.assert_frame_equal(itemA, expected)
# Get a view by default.
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
assert np.isnan(self.panel['ItemA'].values).all()
# Mixed-type yields a copy.
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
assert result._is_copy is not None
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
with catch_warnings():
simplefilter("ignore", FutureWarning)
# XXX: warning in _validate_read_indexer
assert_panel_equal(p.loc[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.loc[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.loc[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.loc[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.loc[items, :, :], p.reindex(items=items))
assert_panel_equal(p.loc[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.loc[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.iloc[:, -1, :]
expected = p.loc[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.loc[item], p[item])
assert_frame_equal(p.loc[item, :], p[item])
assert_frame_equal(p.loc[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.loc[:, date], p.major_xs(date))
assert_frame_equal(p.loc[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.loc[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.loc[item, date], p[item].loc[date])
assert_series_equal(p.loc[item, date, :], p[item].loc[date])
assert_series_equal(p.loc[item, :, col], p[item][col])
assert_series_equal(p.loc[:, date, col], p.major_xs(date).loc[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_getitem_callable(self):
p = self.panel
# GH 12533
assert_frame_equal(p[lambda x: 'ItemB'], p.loc['ItemB'])
assert_panel_equal(p[lambda x: ['ItemB', 'ItemC']],
p.loc[['ItemB', 'ItemC']])
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.iloc[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, [0, 1, 3, 5], -2:] = df
out = p.iloc[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.loc[indexer]
obj.values[:] = 0
assert (obj.values == 0).all()
comp(cp.loc[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
tm.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
pytest.raises(Exception, func, p1, tp)
# versus different objs
pytest.raises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with catch_warnings():
simplefilter("ignore", FutureWarning)
with tm.assert_raises_regex(TypeError,
"There must be an argument "
"for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.panel.set_value(item, mjr, mnr, 1.)
tm.assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
with catch_warnings():
simplefilter("ignore", FutureWarning)
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
assert isinstance(res, Panel)
assert res is not self.panel
assert res.get_value('ItemE', 'foo', 'bar') == 1.5
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
assert is_float_dtype(res3['ItemE'].values)
msg = ("There must be an argument for each "
"axis plus the value provided")
with tm.assert_raises_regex(TypeError, msg):
self.panel.set_value('a')
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
def setup_method(self, method):
self.panel = make_test_panel()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
assert wp._data is self.panel._data
wp = Panel(self.panel._data, copy=True)
assert wp._data is not self.panel._data
tm.assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
assert wp.values.dtype == np.object_
vals = self.panel.values
# no copy
wp = Panel(vals)
assert wp.values is vals
# copy
wp = Panel(vals, copy=True)
assert wp.values is not vals
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
tm.assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3),
minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
tm.assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assert_raises_regex(ValueError, "The number of dimensions required is 3"): # noqa
Panel(np.random.randn(10, 2))
def test_consolidate(self):
assert self.panel._data.is_consolidated()
self.panel['foo'] = 1.
assert not self.panel._data.is_consolidated()
panel = self.panel._consolidate()
assert panel._data.is_consolidated()
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
tm.assert_index_equal(wp.major_axis, self.panel.major_axis)
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
tm.assert_index_equal(wp.major_axis, itemb.index[5:])
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = {k: v.reindex(wp.major_axis).fillna(0)
for k, v in compat.iteritems(d)}
result = Panel(dcasted, dtype=int)
expected = Panel({k: v.astype(int)
for k, v in compat.iteritems(dcasted)})
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel({k: v.astype(np.int32)
for k, v in compat.iteritems(dcasted)})
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = {k: v.values for k, v in self.panel.iteritems()}
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
tm.assert_index_equal(result.major_axis, exp_major)
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
pytest.raises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
pytest.raises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
assert list(p.items) == keys
p = Panel.from_dict(d)
assert list(p.items) == keys
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items,
major_axis=major, minor_axis=minor)
expected = self.panel.reindex(
items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
assert panel['foo'].values.dtype == np.object_
assert panel['A'].values.dtype == np.float64
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5),
lrange(4), lrange(5), lrange(5))
tm.assert_raises_regex(ValueError,
r"Shape of passed values is "
r"\(3, 4, 5\), indices imply "
r"\(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(4), lrange(5))
tm.assert_raises_regex(ValueError,
r"Shape of passed values is "
r"\(3, 4, 5\), indices imply "
r"\(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(5), lrange(4))
tm.assert_raises_regex(ValueError,
r"Shape of passed values is "
r"\(3, 4, 5\), indices imply "
r"\(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
tm.assert_index_equal(conformed.index, self.panel.major_axis)
tm.assert_index_equal(conformed.columns, self.panel.minor_axis)
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_astype(self):
# GH7271
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
panel = Panel(data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
str_data = np.array([[['1', '2'], ['3', '4']],
[['5', '6'], ['7', '8']]])
expected = Panel(str_data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
assert_panel_equal(panel.astype(str), expected)
pytest.raises(NotImplementedError, panel.astype, {0: str})
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
with np.errstate(invalid='ignore'):
expected = np.sqrt(self.panel.values)
assert_almost_equal(applied.values, expected)
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'),
index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'),
index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'),
index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(
lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel({ax: f(self.panel.loc[:, :, ax])
for ax in self.panel.minor_axis})
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel({ax: f(self.panel.loc[ax])
for ax in self.panel.items})
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel({ax: f(self.panel.loc[:, ax])
for ax in self.panel.major_axis})
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(
lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
pytest.raises(Exception, self.panel.reindex,
major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# raise exception put both major and major_axis
pytest.raises(Exception, self.panel.reindex,
minor_axis=new_minor,
minor=new_minor)
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
assert result is not self.panel
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(
major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
assert result is self.panel
def test_reindex_axis_style(self):
panel = Panel(np.random.rand(5, 5, 5))
expected0 = Panel(panel.values).iloc[[0, 1]]
expected1 = Panel(panel.values).iloc[:, [0, 1]]
expected2 = Panel(panel.values).iloc[:, :, [0, 1]]
result = panel.reindex([0, 1], axis=0)
assert_panel_equal(result, expected0)
result = panel.reindex([0, 1], axis=1)
assert_panel_equal(result, expected1)
result = panel.reindex([0, 1], axis=2)
assert_panel_equal(result, expected2)
result = panel.reindex([0, 1], axis=2)
assert_panel_equal(result, expected2)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(
items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert result.items is self.panel.items
assert result.major_axis is self.panel.major_axis
assert result.minor_axis is self.panel.minor_axis
result = self.panel.reindex(
items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indices ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
pytest.raises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(
sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
assert np.isfinite(filled.values).all()
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
pytest.raises(ValueError, self.panel.fillna)
pytest.raises(ValueError, self.panel.fillna, 5, method='ffill')
pytest.raises(TypeError, self.panel.fillna, [1, 2])
pytest.raises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
pytest.raises(NotImplementedError,
lambda: p.fillna(999, limit=1))
# Test in place fillNA
# Expected result
expected = Panel([[[0, 1], [2, 1]], [[10, 11], [12, 11]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
# method='ffill'
p1 = Panel([[[0, 1], [2, np.nan]], [[10, 11], [12, np.nan]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
p1.fillna(method='ffill', inplace=True)
assert_panel_equal(p1, expected)
# method='bfill'
p2 = Panel([[[0, np.nan], [2, 1]], [[10, np.nan], [12, 11]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
p2.fillna(method='bfill', inplace=True)
assert_panel_equal(p2, expected)
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
assert result.items is self.panel.minor_axis
result = self.panel.swapaxes('items', 'major')
assert result.items is self.panel.major_axis
result = self.panel.swapaxes('major', 'minor')
assert result.major_axis is self.panel.minor_axis
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
assert result.items is self.panel.major_axis
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
assert id(self.panel) != id(result)
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose(
'minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor',
'major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assert_raises_regex(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assert_raises_regex(ValueError,
'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
pytest.raises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
assert notna(result.values[1, 0, 1])
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
assert unfiltered.index.names == ('major', 'minor')
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
assert rdf.index.names == df.index.names
assert rdf.columns.names == df.columns.names
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
assert wp['bool'].values.dtype == np.bool_
# Previously, this was mutating the underlying
# index and changing its name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples(
[(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples(
[(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
[3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'],
['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], [
'y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4],
[-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples(
[(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
assert isna(panel[0].loc[1, [0, 1]]).all()
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
tm.assert_raises_regex(
ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx),
shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx),
shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel({i: f.shift(-1)[:-1]
for i, f in self.panel.iteritems()})
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame())
for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
assert_panel_equal(shifted, shifted3)
tm.assert_raises_regex(ValueError, 'does not match',
ps.tshift, freq='M')
# DatetimeIndex
panel = make_test_panel()
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.iloc[:, [0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel(
{'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel(
{'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
assert_panel_equal(expected, result)
def test_numpy_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = np.round(p)
assert_panel_equal(expected, result)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.round, p, out=p)
# removing Panel before NumPy enforces, so just ignore
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_multiindex_get(self):
ind = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.loc['a']
assert_panel_equal(f1, f2)
assert (f1.items == [1, 2]).all()
assert (f2.items == [1, 2]).all()
MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
assert (f1.items == [1, 2]).all()
f1 = wp[('b', 1)]
assert (f1.columns == ['A', 'B', 'C', 'D']).all()
def test_repr_empty(self):
empty = Panel()
repr(empty)
# ignore warning from us, because removing panel
@pytest.mark.filterwarnings("ignore:Using:FutureWarning")
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename(items=mapper)
exp = Index(['foo', 'bar', 'baz'])
tm.assert_index_equal(renamed.items, exp)
renamed = self.panel.rename(minor_axis=str.lower)
exp = Index(['a', 'b', 'c', 'd'])
tm.assert_index_equal(renamed.minor_axis, exp)
# don't copy
renamed_nocopy = self.panel.rename(items=mapper, copy=False)
renamed_nocopy['foo'] = 3.
assert (self.panel['ItemA'].values == 3).all()
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
pytest.skip("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
with ensure_clean('__tmp__.' + ext) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
pytest.skip("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
pytest.skip("Requires xlrd and xlsxwriter. Skipping test.")
with ensure_clean('__tmp__.xlsx') as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
pytest.skip("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
@pytest.mark.filterwarnings("ignore:'.reindex:FutureWarning")
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.loc[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.loc[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.loc[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.loc[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.loc[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.loc[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.loc['b'] = np.nan
result = p.dropna(how='all')
exp = p.loc[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
pytest.raises(KeyError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel(
[[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[3.6, 2., 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_from_dict(self):
pan = Panel({'one': DataFrame([[1.5, np.nan, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]),
'two': DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]])})
other = {'two': DataFrame(
[[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
pan.update(other)
expected = Panel(
{'one': DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]),
'two': DataFrame([[3.6, 2., 3],
[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]])
}
)
assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel(
[[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, overwrite=False)
expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, 2., 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_filtered(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel(
[[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, filter_func=lambda x: x > 2)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, np.nan, 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_raise(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
pytest.raises(Exception, pan.update, *(pan, ),
**{'raise_conflict': True})
def test_all_any(self):
assert (self.panel.all(axis=0).values == nanall(
self.panel, axis=0)).all()
assert (self.panel.all(axis=1).values == nanall(
self.panel, axis=1).T).all()
assert (self.panel.all(axis=2).values == nanall(
self.panel, axis=2).T).all()
assert (self.panel.any(axis=0).values == nanany(
self.panel, axis=0)).all()
assert (self.panel.any(axis=1).values == nanany(
self.panel, axis=1).T).all()
assert (self.panel.any(axis=2).values == nanany(
self.panel, axis=2).T).all()
def test_all_any_unhandled(self):
pytest.raises(NotImplementedError, self.panel.all, bool_only=True)
pytest.raises(NotImplementedError, self.panel.any, bool_only=True)
# GH issue 15960
def test_sort_values(self):
pytest.raises(NotImplementedError, self.panel.sort_values)
pytest.raises(NotImplementedError, self.panel.sort_values, 'ItemA')
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanelFrame(object):
"""
Check that conversions to and from Panel to DataFrame work.
"""
def setup_method(self, method):
panel = make_test_panel()
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_frame()
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
# careful, mutation
self.panel['foo'] = lp2['ItemA']
assert_series_equal(self.panel['foo'].reindex(lp2.index),
lp2['ItemA'],
check_names=False)
def test_ops_scalar(self):
result = self.panel.mul(2)
expected = DataFrame.__mul__(self.panel, 2)
assert_frame_equal(result, expected)
def test_combineFrame(self):
wp = self.panel.to_panel()
result = self.panel.add(wp['ItemA'].stack(), axis=0)
assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.to_panel()
result = self.panel.add(self.panel)
wide_result = result.to_panel()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
result = self.panel.mul(2)
expected = DataFrame(self.panel._data) * 2
assert_frame_equal(result, expected)
def test_combine_series(self):
s = self.panel['ItemA'][:10]
result = self.panel.add(s, axis=0)
expected = DataFrame.add(self.panel, s, axis=0)
assert_frame_equal(result, expected)
s = self.panel.iloc[5]
result = self.panel + s
expected = DataFrame.add(self.panel, s, axis=1)
assert_frame_equal(result, expected)
def test_operators(self):
wp = self.panel.to_panel()
result = (self.panel + 1).to_panel()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
ops = ['add', 'sub', 'mul', 'div',
'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
self.panel = self.panel.to_panel()
for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
for op in ops:
alias = aliases.get(op, op)
f = getattr(operator, alias)
exp = f(self.panel, n)
result = getattr(self.panel, op)(n)
assert_panel_equal(result, exp, check_panel_type=True)
# rops
r_f = lambda x, y: f(y, x)
exp = r_f(self.panel, n)
result = getattr(self.panel, 'r' + op)(n)
assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sort_index(level=1)
assert is_sorted(sorted_minor.index.labels[1])
sorted_major = sorted_minor.sort_index(level=0)
assert is_sorted(sorted_major.index.labels[0])
def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
def test_to_sparse(self):
if isinstance(self.panel, Panel):
msg = 'sparsifying is not supported'
tm.assert_raises_regex(NotImplementedError, msg,
self.panel.to_sparse)
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(start, end)
# TODO truncate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(before=start).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(before=start)
# TODO truncate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(after=end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(after=end)
# TODO truncate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
# truncate on dates that aren't in there
wp = self.panel.to_panel()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_frame()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_panel())
# throw proper exception
pytest.raises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape.reshape import make_axis_dummies
minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8)
assert len(minor_dummies.columns) == len(self.panel.index.levels[1])
major_dummies = make_axis_dummies(self.panel, 'major').astype(np.uint8)
assert len(major_dummies.columns) == len(self.panel.index.levels[0])
mapping = {'A': 'one', 'B': 'one', 'C': 'two', 'D': 'two'}
transformed = make_axis_dummies(self.panel, 'minor',
transform=mapping.get).astype(np.uint8)
assert len(transformed.columns) == 2
tm.assert_index_equal(transformed.columns, Index(['one', 'two']))
# TODO: test correctness
def test_get_dummies(self):
from pandas.core.reshape.reshape import get_dummies, make_axis_dummies
self.panel['Label'] = self.panel.index.labels[1]
minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8)
dummies = get_dummies(self.panel['Label'])
tm.assert_numpy_array_equal(dummies.values, minor_dummies.values)
def test_mean(self):
means = self.panel.mean(level='minor')
# test versus Panel version
wide_means = self.panel.to_panel().mean('major')
assert_frame_equal(means, wide_means)
def test_sum(self):
sums = self.panel.sum(level='minor')
# test versus Panel version
wide_sums = self.panel.to_panel().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
index = self.panel.index
major_count = self.panel.count(level=0)['ItemA']
labels = index.labels[0]
for i, idx in enumerate(index.levels[0]):
assert major_count[i] == (labels == i).sum()
minor_count = self.panel.count(level=1)['ItemA']
labels = index.labels[1]
for i, idx in enumerate(index.levels[1]):
assert minor_count[i] == (labels == i).sum()
def test_join(self):
lp1 = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC'])
joined = lp1.join(lp2)
assert len(joined.columns) == 3
pytest.raises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
expected = MultiIndex.from_arrays([np.tile([1, 2, 3, 4], 3),
np.repeat([1, 2, 3], 4)],
names=['time', 'panel'])
tm.assert_index_equal(index, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_panel_np_all():
wp = Panel({"A": DataFrame({'b': [1, 2]})})
result = np.all(wp)
assert result == np.bool_(True)
| bsd-3-clause |
RomainBrault/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/io/json/test_normalize.py | 6 | 16358 | import pytest
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat, Index, DataFrame
from pandas.io.json import json_normalize
from pandas.io.json.normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
@pytest.fixture
def state_data():
return [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
@pytest.fixture
def author_missing_data():
return [
{'info': None},
{'info':
{'created_at': '11/08/1993', 'last_updated': '26/05/2012'},
'author_name':
{'first': 'Jane', 'last_name': 'Doe'}
}]
class TestJSONNormalize(object):
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({'A': {'A': 1, 'B': 2}})
expected = DataFrame([[1, 2]], columns=['A.A', 'A.B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_')
expected = DataFrame([[1, 2]], columns=['A_A', 'A_B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3')
expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']],
sep='_')
expected = Index(['name', 'pop',
'country', 'states_name']).sort_values()
assert result.columns.sort_values().equals(expected)
def test_value_array_record_prefix(self):
# GH 21536
result = json_normalize({'A': [1, 2]}, 'A', record_prefix='Prefix.')
expected = DataFrame([[1], [2]], columns=['Prefix.0'])
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
with pytest.raises(ValueError):
json_normalize(data, 'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_meta_parameter_not_modified(self):
# GH 18610
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
COLUMNS = ['foo', 'bar']
result = json_normalize(data, 'data', meta=COLUMNS,
meta_prefix='meta')
assert COLUMNS == ['foo', 'bar']
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
def test_missing_field(self, author_missing_data):
# GH20030:
result = json_normalize(author_missing_data)
ex_data = [
{'info': np.nan,
'author_name.first': np.nan,
'author_name.last_name': np.nan,
'info.created_at': np.nan,
'info.last_updated': np.nan},
{'info': None,
'author_name.first': 'Jane',
'author_name.last_name': 'Doe',
'info.created_at': '11/08/1993',
'info.last_updated': '26/05/2012'}
]
expected = DataFrame(ex_data)
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(object):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
assert result == expected
def test_json_normalize_errors(self):
# GH14583: If meta keys are not always present
# a new option to set errors='ignore' has been implemented
i = {
"Trades": [{
"general": {
"tradeid": 100,
"trade_version": 1,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}, {
"general": {
"tradeid": 100,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}
]
}
j = json_normalize(data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='ignore')
expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
'price': {0: '0', 1: '0', 2: '0', 3: '0'},
'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
assert j.fillna('').to_dict() == expected
pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
def test_donot_drop_nonevalues(self):
# GH21356
data = [
{'info': None,
'author_name':
{'first': 'Smith', 'last_name': 'Appleseed'}
},
{'info':
{'created_at': '11/08/1993', 'last_updated': '26/05/2012'},
'author_name':
{'first': 'Jane', 'last_name': 'Doe'}
}
]
result = nested_to_record(data)
expected = [
{'info': None,
'author_name.first': 'Smith',
'author_name.last_name': 'Appleseed'},
{'author_name.first': 'Jane',
'author_name.last_name': 'Doe',
'info.created_at': '11/08/1993',
'info.last_updated': '26/05/2012'}]
assert result == expected
def test_nonetype_top_level_bottom_level(self):
# GH21158: If inner level json has a key with a null value
# make sure it doesnt do a new_d.pop twice and except
data = {
"id": None,
"location": {
"country": {
"state": {
"id": None,
"town.info": {
"id": None,
"region": None,
"x": 49.151580810546875,
"y": -33.148521423339844,
"z": 27.572303771972656}}}
}
}
result = nested_to_record(data)
expected = {
'id': None,
'location.country.state.id': None,
'location.country.state.town.info.id': None,
'location.country.state.town.info.region': None,
'location.country.state.town.info.x': 49.151580810546875,
'location.country.state.town.info.y': -33.148521423339844,
'location.country.state.town.info.z': 27.572303771972656}
assert result == expected
def test_nonetype_multiple_levels(self):
# GH21158: If inner level json has a key with a null value
# make sure it doesnt do a new_d.pop twice and except
data = {
"id": None,
"location": {
"id": None,
"country": {
"id": None,
"state": {
"id": None,
"town.info": {
"region": None,
"x": 49.151580810546875,
"y": -33.148521423339844,
"z": 27.572303771972656}}}
}
}
result = nested_to_record(data)
expected = {
'id': None,
'location.id': None,
'location.country.id': None,
'location.country.state.id': None,
'location.country.state.town.info.region': None,
'location.country.state.town.info.x': 49.151580810546875,
'location.country.state.town.info.y': -33.148521423339844,
'location.country.state.town.info.z': 27.572303771972656}
assert result == expected
| bsd-3-clause |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| agpl-3.0 |
UKPLab/semeval2017-scienceie | code/convNet.py | 1 | 7292 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from extras import VSM, read_and_map
from representation import VeryStupidCBOWMapper, CharMapper
import sys, numpy as np,os
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
from keras.layers import Convolution1D, GlobalMaxPooling1D, Lambda, Merge
from keras.preprocessing import sequence
from keras import backend as K
maxlen=50
maxlen=100
maxlen=150
maxlen=50+2*30
try:
L = int(sys.argv[5])
M = int(sys.argv[6])
R = int(sys.argv[7])
except IndexError:
L = 30
M = 50
R = 30
maxlen=L+M+R
# this is a simple cnn
# if you would want to use it below, you would have to do
# X_train = X_train.reshape(len(X_train),input_shape[0],input_shape[1])
def build_cnn(input_shape, output_dim,nb_filter):
clf = Sequential()
clf.add(Convolution1D(nb_filter=nb_filter,
filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape))
clf.add(GlobalMaxPooling1D())
clf.add(Dense(100))
clf.add(Dropout(0.2))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
# just one filter
def build_cnn_char(input_dim, output_dim,nb_filter):
clf = Sequential()
clf.add(Embedding(input_dim,
32, # character embedding size
input_length=maxlen,
dropout=0.2))
clf.add(Convolution1D(nb_filter=nb_filter,
filter_length=3,border_mode="valid",activation="relu",subsample_length=1))
clf.add(GlobalMaxPooling1D())
clf.add(Dense(100))
clf.add(Dropout(0.2))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
# just one filter
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3):
left = Sequential()
left.add(Embedding(input_dim,
32, # character embedding size
input_length=L,
dropout=0.2))
left.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
left.add(GlobalMaxPooling1D())
left.add(Dense(100))
left.add(Dropout(0.2))
left.add(Activation("tanh"))
center = Sequential()
center.add(Embedding(input_dim,
32, # character embedding size
input_length=M,
dropout=0.2))
center.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
center.add(GlobalMaxPooling1D())
center.add(Dense(100))
center.add(Dropout(0.2))
center.add(Activation("tanh"))
right = Sequential()
right.add(Embedding(input_dim,
32, # character embedding size
input_length=R,
dropout=0.2))
right.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
right.add(GlobalMaxPooling1D())
right.add(Dense(100))
right.add(Dropout(0.2))
right.add(Activation("tanh"))
clf = Sequential()
clf.add(Merge([left,center,right],mode="concat"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
def max_1d(X):
return K.max(X,axis=1)
# multiple filters
def build_cnn_char_complex(input_dim, output_dim,nb_filter):
randomEmbeddingLayer = Embedding(input_dim,32, input_length=maxlen,dropout=0.1)
poolingLayer = Lambda(max_1d, output_shape=(nb_filter,))
conv_filters = []
for n_gram in range(2,4):
ngramModel = Sequential()
ngramModel.add(randomEmbeddingLayer)
ngramModel.add(Convolution1D(nb_filter=nb_filter,
filter_length=n_gram,
border_mode="valid",
activation="relu",
subsample_length=1))
ngramModel.add(poolingLayer)
conv_filters.append(ngramModel)
clf = Sequential()
clf.add(Merge(conv_filters,mode="concat"))
clf.add(Activation("relu"))
clf.add(Dense(100))
clf.add(Dropout(0.1))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
def acc(correct, total):
return 1.0*correct/total
# example argline:
# python convNet.py ../scienceie2017_train/train2 ../scienceie2017_dev/dev ../resources/vsm/glove.6B/glove.6B.100d.txt
if __name__=="__main__":
train_src = sys.argv[1]
dev_src = sys.argv[2]
# vsm_path = sys.argv[3]
vsm_path = None
print("Loading VSM")
vsm = VSM(vsm_path)
try:
csize = 2
except IndexError:
csize = int(sys.argv[4])
try:
n_filter = int(sys.argv[8])
except IndexError:
n_filter = 250
try:
filter_size = int(sys.argv[9])
except IndexError:
filter_size = 3
if len(sys.argv)>10 and sys.argv[10]=="document":
SB = False
else:
SB = True
mapper = CharMapper(vsm,csize,L=L,M=M,R=R,sentence_boundaries=SB)
print("Reading training data")
X_train, y_train, y_values, _ = read_and_map(train_src, mapper)
X_dev, y_dev_gold, _, estrings = read_and_map(dev_src, mapper, y_values)
vocabSize = mapper.curVal
print(X_train.shape)
print(y_train.shape)
#sys.exit(1)
print("Trainig a model")
timesteps = 2*csize + 1 # left, right, center
context_dim = 100
input_shape = (timesteps,context_dim)
clf = build_cnn_char(vocabSize+1, len(y_values)+1,n_filter)
clf = build_cnn_char_threeModels(vocabSize+1, len(y_values)+1,n_filter)
X_left = X_train[:,:L]
X_center = X_train[:,L:L+M]
X_right = X_train[:,L+M:L+M+R]
print L,M,R,X_train.shape,X_left.shape,X_center.shape,X_right.shape,y_train,y_values
clf.fit([X_left,X_center,X_right], to_categorical(y_train, len(y_values)+1), verbose=1, nb_epoch=15)
print("Reading test data")
print("Testing")
X_dev_left = X_dev[:,:L]
X_dev_center = X_dev[:,L:L+M]
X_dev_right = X_dev[:,L+M:L+M+R]
print(X_dev.shape,X_dev_left.shape,X_dev_center.shape,X_dev_right.shape)
y_dev_auto = clf.predict_classes([X_dev_left,X_dev_center,X_dev_right]) # for LogisticRegression just do predict()
print "==PREDICTING=="
for i in xrange(len(y_dev_auto)):
print y_values[y_dev_auto[i]]
| apache-2.0 |
ran5515/DeepDecision | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/util/testing.py | 3 | 92623 | from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit,
StringIO, PY3
)
from pandas.core.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel, Panel4D)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
try:
import pytest
slow = pytest.mark.slow
except ImportError:
# Should be ok to just ignore. If you actually need
# slow then you'll hit an import error long before getting here.
pass
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__%s__.pickle' % rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specifed by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specifed by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
"""
Check that the left and right objects are approximately equal.
Parameters
----------
left : object
right : object
check_exact : bool, default True
Whether to compare number exactly.
check_dtype: bool, default True
check dtype if both a and b are the same type
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right, check_exact=check_exact,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# other sequences
if check_dtype:
if is_number(left) and is_number(right):
# do not compare numeric classes, like np.float64 and float
pass
elif is_bool(left) and is_bool(right):
# do not compare bool classes, like np.bool_ and bool
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = 'numpy array'
else:
obj = 'Input'
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{0} Expected type {1}, found {2} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
import pytest
if is_platform_32bit():
pytest.skip("skipping for 32 bit")
def _skip_module_if_no_mpl():
import pytest
mpl = pytest.importorskip("matplotlib")
mpl.use("Agg", warn=False)
def _skip_if_no_mpl():
try:
import matplotlib as mpl
mpl.use("Agg", warn=False)
except ImportError:
import pytest
pytest.skip("matplotlib not installed")
def _skip_if_mpl_1_5():
import matplotlib as mpl
v = mpl.__version__
if v > LooseVersion('1.4.3') or v[0] == '0':
import pytest
pytest.skip("matplotlib 1.5")
else:
mpl.use("Agg", warn=False)
def _skip_if_no_scipy():
try:
import scipy.stats # noqa
except ImportError:
import pytest
pytest.skip("no scipy.stats module")
try:
import scipy.interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate missing')
try:
import scipy.sparse # noqa
except ImportError:
import pytest
pytest.skip('scipy.sparse missing')
def _check_if_lzma():
try:
return compat.import_lzma()
except ImportError:
return False
def _skip_if_no_lzma():
import pytest
return _check_if_lzma() or pytest.skip('need backports.lzma to run')
def _skip_if_no_xarray():
try:
import xarray
except ImportError:
import pytest
pytest.skip("xarray not installed")
v = xarray.__version__
if v < LooseVersion('0.7.0'):
import pytest
pytest.skip("xarray not version is too low: {0}".format(v))
def _skip_if_no_pytz():
try:
import pytz # noqa
except ImportError:
import pytest
pytest.skip("pytz not installed")
def _skip_if_no_dateutil():
try:
import dateutil # noqa
except ImportError:
import pytest
pytest.skip("dateutil not installed")
def _skip_if_windows_python_3():
if PY3 and is_platform_windows():
import pytest
pytest.skip("not used on python 3/win32")
def _skip_if_windows():
if is_platform_windows():
import pytest
pytest.skip("Running on Windows")
def _skip_if_no_pathlib():
try:
from pathlib import Path # noqa
except ImportError:
import pytest
pytest.skip("pathlib not available")
def _skip_if_no_localpath():
try:
from py.path import local as LocalPath # noqa
except ImportError:
import pytest
pytest.skip("py.path not installed")
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
as we don't match the nansum/nanprod behavior for all-nan
ops, see GH9422
"""
if method not in ['sum', 'prod']:
return False
try:
import bottleneck as bn
return bn.__version__ >= LooseVersion('1.0')
except ImportError:
return False
def skip_if_no_ne(engine='numexpr'):
from pandas.core.computation.expressions import (
_USE_NUMEXPR,
_NUMEXPR_INSTALLED)
if engine == 'numexpr':
if not _USE_NUMEXPR:
import pytest
pytest.skip("numexpr enabled->{enabled}, "
"installed->{installed}".format(
enabled=_USE_NUMEXPR,
installed=_NUMEXPR_INSTALLED))
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_not_us_locale():
import locale
lang, _ = locale.getlocale()
if lang != 'en_US':
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_no_mock():
try:
import mock # noqa
except ImportError:
try:
from unittest import mock # noqa
except ImportError:
import nose
raise nose.SkipTest("mock is not installed")
def _skip_if_no_ipython():
try:
import IPython # noqa
except ImportError:
import nose
raise nose.SkipTest("IPython not installed")
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: %d (file: %s)" %
(fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: %s" % e)
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
raise_assert_detail(obj, '{0} levels are different'.format(obj),
'{0}, {1}'.format(left.nlevels, left),
'{0}, {1}'.format(right.nlevels, right))
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, '{0} length are different'.format(obj),
'{0}, {1}'.format(len(left), left),
'{0}, {1}'.format(len(right), right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{0}]'.format(level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{0} classes are not equivalent'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{0} classes are different'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr),
left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, '
'type encountered {0!r}')
assert isinstance(el, (plt.Axes, dict)), msg.format(
el.__class__.__name__)
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
obj='Categorical', check_category_order=True):
"""Test that Categoricals are equivalent.
Parameters
----------
left, right : Categorical
Categoricals to compare
check_dtype : bool, default True
Check that integer dtype of the codes are the same
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{0}.categories'.format(obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{0}.codes'.format(obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{0}.categories'.format(obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{0}.values'.format(obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
msg = """{0} are different
{1}
[left]: {2}
[right]: {3}""".format(obj, message, left, right)
if diff is not None:
msg = msg + "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
obj='numpy array', check_same=None):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "%r is not %r" % (left_base, right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "%r is %r" % (left_base, right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{0} shapes are different'
.format(obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, 'Series length are different',
'{0}, {1}'.format(len(left), left.index),
'{0}, {1}'.format(len(right), right.index))
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{0}'.format(obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = '[datetimelike_compat=True] {0} is not equal to {1}.'
raise AssertionError(msg.format(left.values, right.values))
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
l = pd.IntervalIndex(left)
r = pd.IntervalIndex(right)
assert_index_equal(l, r, obj='{0}.index'.format(obj))
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{0}'.format(obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'({0}, {1})'.format(*left.shape),
'({0}, {1})'.format(*right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.columns'.format(obj))
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {0}]'.format(i))
def assert_panelnd_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
assert_func : function for comparing data
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
assert item in right, "non-matching item (right) '%s'" % item
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
assert item in left, "non-matching item (left) '%s'" % item
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_index_equal(left.columns, right.columns,
obj='{0}.columns'.format(obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
def assert_sp_list_equal(left, right):
assert isinstance(left, pd.SparseList)
assert isinstance(right, pd.SparseList)
assert_sp_array_equal(left.to_array(), right.to_array())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '%r'" % k
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
assert elem1 is not elem2, ("Expected object %r and "
"object %r to be different "
"objects, were same."
% (type(elem1), type(elem2)))
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name)
def makeIntervalIndex(k=10, name=None):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None):
return RangeIndex(0, k, 1, name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
with warnings.catch_warnings(record=True):
d = dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper))
return Panel4D(d)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
'"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = prefix + '_l%d_g' % i + str(j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R%dC%d" % (r, c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checker when running tests.
#
# Copied this from nipy/nipype
# Copyright of respective developers, License: BSD-3
def skip_if_no_package(pkg_name, min_version=None, max_version=None,
app='pandas', checker=LooseVersion):
"""Check that the min/max version of the required package is installed.
If the package check fails, the test is automatically skipped.
Parameters
----------
pkg_name : string
Name of the required package.
min_version : string, optional
Minimal version number for required package.
max_version : string, optional
Max version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
Examples
--------
package_check('numpy', '1.3')
"""
import pytest
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
if min_version:
msg += ' with version >= %s' % (min_version,)
if max_version:
msg += ' with version < %s' % (max_version,)
try:
mod = __import__(pkg_name)
except ImportError:
mod = None
try:
have_version = mod.__version__
except AttributeError:
pytest.skip('Cannot find version for %s' % pkg_name)
if min_version and checker(have_version) < checker(min_version):
pytest.skip(msg)
if max_version and checker(have_version) >= checker(max_version):
pytest.skip(msg)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error %s" % e)
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
skip("Skipping test because exception "
"message is known and error %s" % e)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error %s" % e)
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`.
This is a port of the `assertRaisesRegexp` function from unittest in
Python 2.7. However, with our migration to `pytest`, please refrain
from using this. Instead, use the following paradigm:
with pytest.raises(_exception) as exc_info:
func(*args, **kwargs)
exc_info.matches(reg_exp)
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{0} not raised.".format(exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
e = AssertionError('"%s" does not match "%s"' %
(self.regexp.pattern, str(val)))
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {0} != {1}. "
"Warning message: {2}".format(
actual_warning.filename, caller.filename,
actual_warning.message))
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
if is_platform_windows():
import pytest
pytest.skip("timezone setting not supported on windows")
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
| mit |
jaidevd/scikit-learn | sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
ephes/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
zhoulingjun/zipline | zipline/assets/assets.py | 8 | 34670 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
import numpy as np
import sqlite3
from sqlite3 import Row
import warnings
from logbook import Logger
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
from zipline.errors import (
ConsumeAssetMetaDataError,
InvalidAssetType,
MultipleSymbolsFound,
RootSymbolNotFound,
SidAssignmentError,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets._assets import (
Asset, Equity, Future
)
log = Logger('assets.py')
# Expected fields for an Asset's metadata
ASSET_FIELDS = [
'sid',
'asset_type',
'symbol',
'root_symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
'notice_date',
'expiration_date',
'contract_multiplier',
# The following fields are for compatibility with other systems
'file_name', # Used as symbol
'company_name', # Used as asset_name
'start_date_nano', # Used as start_date
'end_date_nano', # Used as end_date
]
# Expected fields for an Asset's metadata
ASSET_TABLE_FIELDS = [
'sid',
'symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
]
# Expected fields for an Asset's metadata
FUTURE_TABLE_FIELDS = ASSET_TABLE_FIELDS + [
'root_symbol',
'notice_date',
'expiration_date',
'contract_multiplier',
]
EQUITY_TABLE_FIELDS = ASSET_TABLE_FIELDS
# Create the query once from the fields, so that the join is not done
# repeatedly.
FUTURE_BY_SID_QUERY = 'select {0} from futures where sid=?'.format(
", ".join(FUTURE_TABLE_FIELDS))
EQUITY_BY_SID_QUERY = 'select {0} from equities where sid=?'.format(
", ".join(EQUITY_TABLE_FIELDS))
class AssetFinder(object):
def __init__(self,
metadata=None,
allow_sid_assignment=True,
fuzzy_char=None,
db_path=':memory:',
create_table=True):
self.fuzzy_char = fuzzy_char
# This flag controls if the AssetFinder is allowed to generate its own
# sids. If False, metadata that does not contain a sid will raise an
# exception when building assets.
self.allow_sid_assignment = allow_sid_assignment
if allow_sid_assignment:
self.end_date_to_assign = normalize_date(
pd.Timestamp('now', tz='UTC'))
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
# The AssetFinder also holds a nested-dict of all metadata for
# reference when building Assets
self.metadata_cache = {}
# Create table and read in metadata.
# Should we use flags like 'r', 'w', instead?
# What we need to support is:
# - A 'throwaway' mode where the metadata is read each run.
# - A 'write' mode where the data is written to the provided db_path
# - A 'read' mode where the asset finder uses a prexisting db.
if create_table:
self.create_db_tables()
if metadata is not None:
self.consume_metadata(metadata)
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def create_db_tables(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE equities(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
fuzzy text
)""")
c.execute('CREATE INDEX equities_sid on equities(sid)')
c.execute('CREATE INDEX equities_symbol on equities(symbol)')
c.execute('CREATE INDEX equities_fuzzy on equities(fuzzy)')
c.execute("""
CREATE TABLE futures(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
root_symbol text,
notice_date integer,
expiration_date integer,
contract_multiplier real
)""")
c.execute('CREATE INDEX futures_sid on futures(sid)')
c.execute('CREATE INDEX futures_root_symbol on equities(symbol)')
c.execute("""
CREATE TABLE asset_router
(sid integer,
asset_type text)
""")
c.execute('CREATE INDEX asset_router_sid on asset_router(sid)')
self.conn.commit()
def asset_type_by_sid(self, sid):
try:
return self._asset_type_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
# Python 3 compatibility required forcing to int for sid = 0.
t = (int(sid),)
query = 'select asset_type from asset_router where sid=:sid'
c.execute(query, t)
data = c.fetchone()
if data is None:
return
asset_type = data[0]
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid) for sid in sids]
def _retrieve_equity(self, sid):
try:
return self._equity_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
c.row_factory = Row
t = (int(sid),)
c.execute(EQUITY_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
try:
return self._future_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
t = (int(sid),)
c.row_factory = Row
c.execute(FUTURE_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
if data['notice_date']:
data['notice_date'] = pd.Timestamp(
data['notice_date'], tz='UTC')
if data['expiration_date']:
data['expiration_date'] = pd.Timestamp(
data['expiration_date'], tz='UTC')
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol_resolve_multiple(self, symbol, as_of_date=None):
"""
Return matching Asset of name symbol in database.
If multiple Assets are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Asset was active at as_of_date, and allow_expired is False
raises SymbolNotFound.
"""
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
c = self.conn.cursor()
if as_of_date:
# If one SID exists for symbol, return that symbol
t = (symbol, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
if len(candidates) == 0:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"order by end_date desc "
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc " +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
raise SymbolNotFound(symbol=symbol)
else:
t = (symbol,)
query = ("select sid from equities where symbol=?")
c.execute(query, t)
data = c.fetchall()
if len(data) == 1:
return self._retrieve_equity(data[0][0])
elif not data:
raise SymbolNotFound(symbol=symbol)
else:
options = []
for row in data:
sid = row[0]
asset = self._retrieve_equity(sid)
options.append(asset)
raise MultipleSymbolsFound(symbol=symbol,
options=options)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
If a fuzzy string is provided, then we try various symbols based on
the provided symbol. This is to facilitate mapping from a broker's
symbol to ours in cases where mapping to the broker's symbol loses
information. For example, if we have CMCS_A, but a broker has CMCSA,
when the broker provides CMCSA, it can also provide fuzzy='_',
so we can find a match by inserting an underscore.
"""
symbol = symbol.upper()
as_of_date = normalize_date(as_of_date)
if not fuzzy:
try:
return self.lookup_symbol_resolve_multiple(symbol, as_of_date)
except SymbolNotFound:
return None
else:
c = self.conn.cursor()
fuzzy = symbol.replace(self.fuzzy_char, '')
t = (fuzzy, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where fuzzy=? " +
"and start_date<=? " +
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
# If one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc" +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc. If NaT is given, the
chain is unbounded, and all contracts for this root symbol
are returned.
knowledge_date : pd.Timestamp or pd.NaT
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date. If NaT is given and as_of_date is
is not NaT, the value of as_of_date is used for
knowledge_date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
c = self.conn.cursor()
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
t = {'root_symbol': root_symbol}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
order by notice_date asc
""", t)
else:
if knowledge_date is pd.NaT:
# If knowledge_date is NaT, default to using as_of_date
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': as_of_date.value}
else:
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': knowledge_date.value}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
and :as_of_date < notice_date
and start_date <= :knowledge_date
order by notice_date asc
""", t)
sids = [r[0] for r in c.fetchall()]
if not sids:
# Check if root symbol exists.
c.execute("""
select count(sid) from futures where root_symbol=:root_symbol
""", t)
count = c.fetchone()[0]
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
else:
# If symbol exists, return empty future chain.
return []
return [self._retrieve_futures_contract(sid) for sid in sids]
@property
def sids(self):
c = self.conn.cursor()
query = 'select sid from asset_router'
c.execute(query)
return [r[0] for r in c.fetchall()]
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
try:
matches.append(
self.lookup_symbol_resolve_multiple(
asset_convertible,
as_of_date,
)
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
__________
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
_______
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# If symbols or Assets are provided, construction and mapping is
# necessary
self.consume_identifiers(index)
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _insert_metadata(self, identifier, **kwargs):
"""
Inserts the given metadata kwargs to the entry for the given
identifier. Matching fields in the existing entry will be overwritten.
:param identifier: The identifier for which to insert metadata
:param kwargs: The keyed metadata to insert
"""
if identifier in self.metadata_cache:
# Multiple pass insertion no longer supported.
# This could and probably should raise an Exception, but is
# currently just a short-circuit for compatibility with existing
# testing structure in the test_algorithm module which creates
# multiple sources which all insert redundant metadata.
return
entry = {}
for key, value in kwargs.items():
# Do not accept invalid fields
if key not in ASSET_FIELDS:
continue
# Do not accept Nones
if value is None:
continue
# Do not accept empty strings
if value == '':
continue
# Do not accept nans from dataframes
if isinstance(value, float) and np.isnan(value):
continue
entry[key] = value
# Check if the sid is declared
try:
entry['sid']
except KeyError:
# If the identifier is not a sid, assign one
if hasattr(identifier, '__int__'):
entry['sid'] = identifier.__int__()
else:
if self.allow_sid_assignment:
# Assign the sid the value of its insertion order.
# This assumes that we are assigning values to all assets.
entry['sid'] = len(self.metadata_cache)
else:
raise SidAssignmentError(identifier=identifier)
# If the file_name is in the kwargs, it will be used as the symbol
try:
entry['symbol'] = entry.pop('file_name')
except KeyError:
pass
# If the identifier coming in was a string and there is no defined
# symbol yet, set the symbol to the incoming identifier
try:
entry['symbol']
pass
except KeyError:
if isinstance(identifier, string_types):
entry['symbol'] = identifier
# If the company_name is in the kwargs, it may be the asset_name
try:
company_name = entry.pop('company_name')
try:
entry['asset_name']
except KeyError:
entry['asset_name'] = company_name
except KeyError:
pass
# If dates are given as nanos, pop them
try:
entry['start_date'] = entry.pop('start_date_nano')
except KeyError:
pass
try:
entry['end_date'] = entry.pop('end_date_nano')
except KeyError:
pass
try:
entry['notice_date'] = entry.pop('notice_date_nano')
except KeyError:
pass
try:
entry['expiration_date'] = entry.pop('expiration_date_nano')
except KeyError:
pass
# Process dates to Timestamps
try:
entry['start_date'] = pd.Timestamp(entry['start_date'], tz='UTC')
except KeyError:
# Set a default start_date of the EPOCH, so that all date queries
# work when a start date is not provided.
entry['start_date'] = pd.Timestamp(0, tz='UTC')
try:
# Set a default end_date of 'now', so that all date queries
# work when a end date is not provided.
entry['end_date'] = pd.Timestamp(entry['end_date'], tz='UTC')
except KeyError:
entry['end_date'] = self.end_date_to_assign
try:
entry['notice_date'] = pd.Timestamp(entry['notice_date'],
tz='UTC')
except KeyError:
pass
try:
entry['expiration_date'] = pd.Timestamp(entry['expiration_date'],
tz='UTC')
except KeyError:
pass
# Build an Asset of the appropriate type, default to Equity
asset_type = entry.pop('asset_type', 'equity')
if asset_type.lower() == 'equity':
try:
fuzzy = entry['symbol'].replace(self.fuzzy_char, '') \
if self.fuzzy_char else None
except KeyError:
fuzzy = None
asset = Equity(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
fuzzy)
c.execute("""INSERT INTO equities(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
fuzzy)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'equity')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
elif asset_type.lower() == 'future':
asset = Future(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
asset.root_symbol,
asset.notice_date.value if asset.notice_date else None,
asset.expiration_date.value
if asset.expiration_date else None,
asset.contract_multiplier)
c.execute("""INSERT INTO futures(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
root_symbol,
notice_date,
expiration_date,
contract_multiplier)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'future')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
else:
raise InvalidAssetType(asset_type=asset_type)
self.metadata_cache[identifier] = entry
def consume_identifiers(self, identifiers):
"""
Consumes the given identifiers in to the metadata cache of this
AssetFinder.
"""
for identifier in identifiers:
# Handle case where full Assets are passed in
# For example, in the creation of a DataFrameSource, the source's
# 'sid' args may be full Assets
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
metadata['asset_type'] = identifier.__class__.__name__
self.insert_metadata(identifier=sid, **metadata)
else:
self.insert_metadata(identifier)
def consume_metadata(self, metadata):
"""
Consumes the provided metadata in to the metadata cache. The
existing values in the cache will be overwritten when there
is a conflict.
:param metadata: The metadata to be consumed
"""
# Handle dicts
if isinstance(metadata, dict):
self._insert_metadata_dict(metadata)
# Handle DataFrames
elif isinstance(metadata, pd.DataFrame):
self._insert_metadata_dataframe(metadata)
# Handle readables
elif hasattr(metadata, 'read'):
self._insert_metadata_readable(metadata)
else:
raise ConsumeAssetMetaDataError(obj=metadata)
def clear_metadata(self):
"""
Used for testing.
"""
self.metadata_cache = {}
self.conn = sqlite3.connect(':memory:')
self.create_db_tables()
def insert_metadata(self, identifier, **kwargs):
self._insert_metadata(identifier, **kwargs)
self.conn.commit()
def _insert_metadata_dataframe(self, dataframe):
for identifier, row in dataframe.iterrows():
self._insert_metadata(identifier, **row)
self.conn.commit()
def _insert_metadata_dict(self, dict):
for identifier, entry in dict.items():
self._insert_metadata(identifier, **entry)
self.conn.commit()
def _insert_metadata_readable(self, readable):
for row in readable.read():
# Parse out the row of the readable object
metadata_dict = {}
for field in ASSET_FIELDS:
try:
row_value = row[field]
# Avoid passing placeholders
if row_value and (row_value != 'None'):
metadata_dict[field] = row[field]
except KeyError:
continue
except IndexError:
continue
# Locate the identifier, fail if not found
if 'sid' in metadata_dict:
identifier = metadata_dict['sid']
elif 'symbol' in metadata_dict:
identifier = metadata_dict['symbol']
else:
raise ConsumeAssetMetaDataError(obj=row)
self._insert_metadata(identifier, **metadata_dict)
self.conn.commit()
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
FUTURE OPTIMIZATION: We're looping over a big array, which means this
probably should be in C/Cython.
"""
with self.conn as transaction:
results = transaction.execute(
'SELECT sid, start_date, end_date from equities'
).fetchall()
lifetimes = np.recarray(
shape=(len(results),),
dtype=[('sid', 'i8'), ('start', 'i8'), ('end', 'i8')],
)
# TODO: This is **WAY** slower than it could be because we have to
# check for None everywhere. If we represented "no start date" as
# 0, and "no end date" as MAX_INT in our metadata, this would be
# significantly faster.
NO_START = 0
NO_END = np.iinfo(int).max
for idx, (sid, start, end) in enumerate(results):
lifetimes[idx] = (
sid,
start if start is not None else NO_START,
end if end is not None else NO_END,
)
return lifetimes
def lifetimes(self, dates):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `data`.
See Also
--------
numpy.putmask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
mask = (lifetimes.start <= raw_dates) & (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
| apache-2.0 |
vivekmishra1991/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
faroit/loudness | python/tests/test_OME.py | 1 | 2084 | import numpy as np
import matplotlib.pyplot as plt
import loudness as ln
def plotResponse(freqPoints, dataPoints,
freqsInterp, responseInterp,
ylim=(-40, 10), title = ""):
if np.any(dataPoints):
plt.semilogx(freqPoints, dataPoints, 'o')
plt.semilogx(freqsInterp, responseInterp)
plt.xlim(20, 20e3)
plt.ylim(ylim)
plt.xlabel("Frequency, Hz")
plt.ylabel("Response, dB")
plt.title(title)
plt.show()
def plotMiddleEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(filterType, ln.OME.NONE)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getMiddleEarFreqPoints()
dataPoints = ome.getMiddleEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotOuterEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(ln.OME.NONE, filterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getOuterEarFreqPoints()
dataPoints = ome.getOuterEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotCombined(middleFilterType, outerFilterType, ylim=(-40, 10)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(middleFilterType, outerFilterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
plotResponse(None, None,
freqs, response, ylim)
plt.figure(1)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR, (-40, 0))
plt.figure(2)
plotMiddleEar(ln.OME.CHGM2011_MIDDLE_EAR, (-40, 10))
plt.figure(2)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR_HPF, (-40, 0))
plt.figure(3)
plotOuterEar(ln.OME.ANSIS342007_FREEFIELD, (-5, 20))
plt.figure(4)
plotOuterEar(ln.OME.ANSIS342007_DIFFUSEFIELD, (-5, 20))
plt.figure(5)
plotOuterEar(ln.OME.BD_DT990, (-10, 10))
plt.figure(6)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR,
ln.OME.ANSIS342007_FREEFIELD, (-40, 10))
plt.figure(7)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR, ln.OME.BD_DT990, (-40, 10))
| gpl-3.0 |
jrshust/spark | python/setup.py | 25 | 9659 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.4'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
xyguo/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
FernanOrtega/DAT210x | Module2/assignment3.py | 1 | 1065 | import pandas as pd
# TODO: Load up the dataset
# Ensuring you set the appropriate header column names
#
df = pd.read_csv('Datasets/servo.data', names=['motor', 'screw', 'pgain', 'vgain', 'class'])
print df.head()
# TODO: Create a slice that contains all entries
# having a vgain equal to 5. Then print the
# length of (# of samples in) that slice:
#
df_vgain = df[df.vgain == 5]
print df_vgain.iloc[:,0].count()
# TODO: Create a slice that contains all entries
# having a motor equal to E and screw equal
# to E. Then print the length of (# of
# samples in) that slice:
#
# .. your code here ..
df_eq = df[(df.motor == 'E') & (df.screw == 'E')]
print df_eq.iloc[:,0].count()
# TODO: Create a slice that contains all entries
# having a pgain equal to 4. Use one of the
# various methods of finding the mean vgain
# value for the samples in that slice. Once
# you've found it, print it:
#
df_pgain = df[df.pgain == 4]
print df_pgain.vgain.mean(0)
# TODO: (Bonus) See what happens when you run
# the .dtypes method on your dataframe!
print df.dtypes
| mit |
DeepVisionTeam/TensorFlowBook | Titanic/data_processing.py | 2 | 4807 | import os
import re
import pandas as pd
import tensorflow as tf
pjoin = os.path.join
DATA_DIR = pjoin(os.path.dirname(__file__), 'data')
train_data = pd.read_csv(pjoin(DATA_DIR, 'train.csv'))
test_data = pd.read_csv(pjoin(DATA_DIR, 'test.csv'))
# Translation:
# Don: an honorific title used in Spain, Portugal, Italy
# Dona: Feminine form for don
# Mme: Madame, Mrs
# Mlle: Mademoiselle, Miss
# Jonkheer (female equivalent: Jonkvrouw) is a Dutch honorific of nobility
HONORABLE_TITLES = ['sir', 'lady', 'don', 'dona', 'countess', 'jonkheer',
'major', 'col', 'dr', 'master', 'capt']
NORMAL_TITLES = ['mr', 'ms', 'mrs', 'miss', 'mme', 'mlle', 'rev']
TITLES = HONORABLE_TITLES + NORMAL_TITLES
def get_title(name):
title_search = re.search('([A-Za-z]+)\.', name)
return title_search.group(1).lower()
def get_family(row):
last_name = row['Name'].split(",")[0]
if last_name:
family_size = 1 + row['Parch'] + row['SibSp']
if family_size > 3:
return "{0}_{1}".format(last_name.lower(), family_size)
else:
return "nofamily"
else:
return "unknown"
def get_deck(cabin):
if pd.isnull(cabin):
return 'U'
return cabin[:1]
class TitanicDigest(object):
def __init__(self, dataset):
self.count_by_sex = dataset.groupby('Sex')['PassengerId'].count()
self.mean_age = dataset['Age'].mean()
self.mean_age_by_sex = dataset.groupby("Sex")["Age"].mean()
self.mean_fare_by_class = dataset.groupby("Pclass")["Fare"].mean()
self.titles = TITLES
self.families = dataset.apply(get_family, axis=1).unique().tolist()
self.decks = dataset["Cabin"].apply(get_deck).unique().tolist()
self.embarkments = dataset.Embarked.unique().tolist()
self.embark_mode = dataset.Embarked.dropna().mode().values
def preprocess(data, digest):
# convert ['male', 'female'] values of Sex to [1, 0]
data['Sex'] = data['Sex'].apply(lambda s: 1 if s == 'male' else 0)
# fill empty age field with mean age
data['Age'] = data['Age'].apply(
lambda age: digest.mean_age if pd.isnull(age) else age)
# is child flag
data['Child'] = data['Age'].apply(lambda age: 1 if age <= 15 else 0)
# fill fare with mean fare of the class
def get_fare_value(row):
if pd.isnull(row['Fare']):
return digest.mean_fare_by_class[row['Pclass']]
else:
return row['Fare']
data['Fare'] = data.apply(get_fare_value, axis=1)
# fill Embarked with mode
data['Embarked'] = data['Embarked'].apply(
lambda e: digest.embark_mode if pd.isnull(e) else e)
data["EmbarkedF"] = data["Embarked"].apply(digest.embarkments.index)
#
data['Cabin'] = data['Cabin'].apply(lambda c: 'U0' if pd.isnull(c) else c)
# Deck
data["Deck"] = data["Cabin"].apply(lambda cabin: cabin[0])
data["DeckF"] = data['Deck'].apply(digest.decks.index)
data['Title'] = data['Name'].apply(get_title)
data['TitleF'] = data['Title'].apply(digest.titles.index)
data['Honor'] = data['Title'].apply(
lambda title: int(title in HONORABLE_TITLES))
data['Family'] = data.apply(get_family, axis=1)
if 'Survived' in data.keys():
data['Deceased'] = data['Survived'].apply(lambda s: int(not s))
return data
digest = TitanicDigest(train_data)
def get_train_data():
return preprocess(train_data, digest)
def get_test_data():
return preprocess(test_data, digest)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def transform_to_tfrecord():
data = pd.read_csv(pjoin(DATA_DIR, 'train.csv'))
filepath = pjoin(DATA_DIR, 'data.tfrecords')
writer = tf.python_io.TFRecordWriter(filepath)
for i in range(len(data)):
feature = {}
for key in data.keys():
value = data[key][i]
if isinstance(value, int):
value = tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
elif isinstance(value, float):
value = tf.train.Feature(
float_list=tf.train.FloatList(value=[value])
)
elif isinstance(value, str):
value = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[value.encode(encoding="utf-8")])
)
feature[key] = value
example = tf.train.Example(
features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
if __name__ == '__main__':
transform_to_tfrecord()
| apache-2.0 |
tomzw11/Pydrone | route.py | 1 | 2000 | import matplotlib.pyplot as plt
import matplotlib.patches as patches
def route(root):
root_height = root[2]
coordinates = [\
[0.42*root_height+root[0],0.42*root_height+root[1],root_height/2],\
[-0.42*root_height+root[0],0.42*root_height+root[1],root_height/2],\
[-0.42*root_height+root[0],-0.15*root_height+root[1],root_height/2],\
[0.42*root_height+root[0],-0.15*root_height+root[1],root_height/2]]
return coordinates
if __name__ == "__main__":
meter_to_feet = 3.28
root = [0,0,16*1]
print 'root',root,'\n'
level1 = route(root)
print 'level 1 \n'
print level1[0],'\n'
print level1[1],'\n'
print level1[2],'\n'
print level1[3],'\n'
print 'level 2 \n'
level2 = [[0]*3]*4
for x in xrange(4):
level2[x] = route(level1[x])
for y in xrange(4):
print 'level2 point[',x+1,y+1,']',level2[x][y],'\n'
fig, ax = plt.subplots()
ball, = plt.plot(6.72+1.52,6.72+1.52,'mo')
plt.plot(0,0,'bo')
plt.plot([level1[0][0],level1[1][0],level1[2][0],level1[3][0]],[level1[0][1],level1[1][1],level1[2][1],level1[3][1]],'ro')
rect_blue = patches.Rectangle((-13.44,-4.8),13.44*2,9.12*2,linewidth=1,edgecolor='b',facecolor='b',alpha = 0.1)
ax.add_patch(rect_blue)
rect_red = patches.Rectangle((0,4.23),13.44,9.12,linewidth=1,edgecolor='r',facecolor='r',alpha = 0.3)
ax.add_patch(rect_red)
plt.plot([level2[0][0][0],level2[0][1][0],level2[0][2][0],level2[0][3][0]],[level2[0][0][1],level2[0][1][1],level2[0][2][1],level2[0][3][1]],'go')
rect_green = patches.Rectangle((6.72,6.72+4.23/2),13.44/2,9.12/2,linewidth=1,edgecolor='g',facecolor='g',alpha = 0.5)
ax.add_patch(rect_green)
linear_s = [12,12]
plt.plot(12,12,'yo')
rect_yellow = patches.Rectangle((10,11),13.44/4,9.12/4,linewidth=1,edgecolor='y',facecolor='y',alpha = 0.5)
ax.add_patch(rect_yellow)
ax.legend([ball,rect_blue,rect_red,rect_green,rect_yellow],['Ball','Root View','Level 1 - 4 anchors','Level 2 - 16 anchors','Linear Search - 64 anchors'])
plt.axis([-13.44, 13.44, -4.8, 13.44])
plt.show()
| mit |
moutai/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
MysterionRise/fantazy-predictor | enriching_data.py | 1 | 10896 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import calendar
import os
import pandas as pd
# Правила подсчета очков:
#
# за участие в матче – 2 очка, если сыграно 10 минут и больше; 1 очко, если сыграно меньше 10 минут
#
# за победу – 3 очка (в гостях); 2 очка (дома)
#
# за поражение – минус 3 очка (дома); минуc 2 очка (в гостях)
#
# Принцип начисления очков следующий:
#
# количество очков + количество передач + количество перехватов + количество подборов +
# количество блок-шотов + количество совершенных штрафных + количество совершенных двухочковых + количество совершенных трехочковых
#
# - количество попыток штрафных бросков - количество попыток двухочковых – количество попыток трехочковых –
# удвоенная цифра от количества потерь - количество фолов
def convert_to_sec(time_str):
if pd.isnull(time_str):
return 0
try:
m, s = time_str.split(':')
return int(m) * 60 + int(s)
except Exception as inst:
print(time_str)
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
def get_sec(row):
time_str = row['minutes']
return convert_to_sec(time_str)
def getOrDefault(value):
if pd.isnull(value):
return 0
return int(value)
def extractYear(row):
return row['date'].year
def extractMonth(row):
return row['date'].month
def extractDay(row):
return row['date'].day
def concat1(row):
return row['opponent'] + str(row['year'])
def concat2(row):
return row['team'] + str(row['year'])
def concat3(row):
return row['name'] + str(row['year'])
def concat4(row):
return row['opponent'] + str(row['month']) + str(row['year'])
def concat5(row):
return row['team'] + str(row['month']) + str(row['year'])
def concat6(row):
return row['name'] + str(row['month']) + str(row['year'])
def getDayOfTheWeek(row):
day = calendar.day_name[row['date'].weekday()]
return day[:3]
def convert_age(row):
if pd.isnull(row['age']):
return 0
years, days = row['age'].split('-')
return int(years) + 1.0 * int(days) / 365
def split_result(x):
try:
sp = x.split('(')
return sp[0].strip(), sp[1][:-1]
except Exception as inst:
print(x)
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
# (FG + 0.5 * 3P) / FGA
def calc_efg(row):
try:
fg = row['fg']
fg3 = row['fg3']
fga = row['fga']
if fga == 0:
return 0.0
return (fg + 0.5 * fg3) / fga
except Exception as inst:
print(row)
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
def calc_fantasy(row):
if pd.isnull(row['minutes']):
return 0
fantasy_points = 0
if convert_to_sec(row['minutes']) >= 10 * 60:
fantasy_points += 2
else:
fantasy_points += 1
if 'W' in str(row['result']):
if row['location'] == '@':
fantasy_points += 3
else:
fantasy_points += 2
else:
if row['location'] == '@':
fantasy_points -= 2
else:
fantasy_points -= 3
fantasy_points += getOrDefault(row['pts'])
fantasy_points += getOrDefault(row['ast'])
fantasy_points += getOrDefault(row['stl'])
fantasy_points += getOrDefault(row['trb'])
fantasy_points += getOrDefault(row['blk'])
fantasy_points += getOrDefault(row['ft'])
fantasy_points += getOrDefault(row['fg'])
fantasy_points -= getOrDefault(row['fta'])
fantasy_points -= getOrDefault(row['fga'])
fantasy_points -= 2 * getOrDefault(row['tov'])
fantasy_points -= getOrDefault(row['pf'])
return fantasy_points
def enrich_player_df(df):
df['fantasy_points'] = df.apply(lambda row: calc_fantasy(row), axis=1)
df['year'] = df.apply(lambda row: extractYear(row), axis=1)
df['month'] = df.apply(lambda row: extractMonth(row), axis=1)
df['day'] = df.apply(lambda row: extractDay(row), axis=1)
df['opponent2'] = df.apply(lambda row: concat1(row), axis=1)
df['opponent3'] = df.apply(lambda row: concat4(row), axis=1)
df['team3'] = df.apply(lambda row: concat5(row), axis=1)
df['name3'] = df.apply(lambda row: concat6(row), axis=1)
df['name2'] = df.apply(lambda row: concat3(row), axis=1)
df['team2'] = df.apply(lambda row: concat2(row), axis=1)
df['age1'] = df.apply(lambda row: convert_age(row), axis=1)
df['seconds'] = df.apply(lambda row: get_sec(row), axis=1)
for i in range(1, 6):
df['mean_pts_' + str(i)] = df['pts'].rolling(i).mean().shift(1)
df['efg'] = df.apply(lambda row: calc_efg(row), axis=1)
df['mefg'] = df['efg'].expanding().mean().shift(1)
df['day_of_the_week'] = df.apply(lambda row: getDayOfTheWeek(row), axis=1)
df['mfp'] = df['fantasy_points'].expanding().mean().shift(1)
df['medfp'] = df['fantasy_points'].expanding().median().shift(1)
df['msec'] = df['seconds'].expanding().mean().shift(1)
df['mpts'] = df['pts'].expanding().mean().shift(1)
df['mast'] = df['ast'].expanding().mean().shift(1)
df['mtrb'] = df['trb'].expanding().mean().shift(1)
df['mstl'] = df['stl'].expanding().mean().shift(1)
df['mpf'] = df['pf'].expanding().mean().shift(1)
df['mtov'] = df['tov'].expanding().mean().shift(1)
df['mblk'] = df['blk'].expanding().mean().shift(1)
df['mfg'] = df['fg'].expanding().mean().shift(1)
df['mfg3'] = df['fg3'].expanding().mean().shift(1)
df['mft'] = df['ft'].expanding().mean().shift(1)
df['mfg3_pct'] = df['fg3_pct'].expanding().mean().shift(1)
df['mfg_pct'] = df['fg_pct'].expanding().mean().shift(1)
df['mft_pct'] = df['ft_pct'].expanding().mean().shift(1)
# number of games in last 5 days
df['rest_days'] = df['date'].diff().apply(lambda x: x.days)
for i in [1, 7, 10, 11, 12]:
df['mean_rest_days_' + str(i)] = df['rest_days'].rolling(i).mean().shift(1)
for i in [10, 21, 31, 38, 39]:
df['mean_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).mean().shift(1)
for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
df['mean_sec_' + str(i)] = df['seconds'].rolling(i).mean().shift(1)
for i in [3, 4, 12, 16, 17, 28, 36]:
df['skew_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).skew().shift(1)
return df
def enrich_player_df_for_upcoming_games(df):
df['fantasy_points'] = df.apply(lambda row: calc_fantasy(row), axis=1)
df['year'] = df.apply(lambda row: extractYear(row), axis=1)
df['month'] = df.apply(lambda row: extractMonth(row), axis=1)
df['day'] = df.apply(lambda row: extractDay(row), axis=1)
df['opponent2'] = df.apply(lambda row: concat1(row), axis=1)
df['opponent3'] = df.apply(lambda row: concat4(row), axis=1)
df['team3'] = df.apply(lambda row: concat5(row), axis=1)
df['name3'] = df.apply(lambda row: concat6(row), axis=1)
df['name2'] = df.apply(lambda row: concat3(row), axis=1)
df['team2'] = df.apply(lambda row: concat2(row), axis=1)
df['age1'] = df.apply(lambda row: convert_age(row), axis=1)
df['seconds'] = df.apply(lambda row: get_sec(row), axis=1)
for i in range(1, 6):
df['mean_pts_' + str(i)] = df['pts'].rolling(i).mean().shift(1)
df['efg'] = df.apply(lambda row: calc_efg(row), axis=1)
df['mefg'] = df['efg'].expanding().mean().shift(1)
df['day_of_the_week'] = df.apply(lambda row: getDayOfTheWeek(row), axis=1)
df['mfp'] = df['fantasy_points'].expanding().mean().shift(1)
df['medfp'] = df['fantasy_points'].expanding().median().shift(1)
df['msec'] = df['seconds'].expanding().mean().shift(1)
df['mpts'] = df['pts'].expanding().mean().shift(1)
df['mast'] = df['ast'].expanding().mean().shift(1)
df['mtrb'] = df['trb'].expanding().mean().shift(1)
df['mstl'] = df['stl'].expanding().mean().shift(1)
df['mpf'] = df['pf'].expanding().mean().shift(1)
df['mtov'] = df['tov'].expanding().mean().shift(1)
df['mblk'] = df['blk'].expanding().mean().shift(1)
df['mfg'] = df['fg'].expanding().mean().shift(1)
df['mfg3'] = df['fg3'].expanding().mean().shift(1)
df['mft'] = df['ft'].expanding().mean().shift(1)
df['mfg3_pct'] = df['fg3_pct'].expanding().mean().shift(1)
df['mfg_pct'] = df['fg_pct'].expanding().mean().shift(1)
df['mft_pct'] = df['ft_pct'].expanding().mean().shift(1)
# number of games in last 5 days
df['rest_days'] = df['date'].diff().apply(lambda x: x.days)
for i in [1, 7, 10, 11, 12]:
df['mean_rest_days_' + str(i)] = df['rest_days'].rolling(i).mean().shift(1)
for i in [10, 21, 31, 38, 39]:
df['mean_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).mean().shift(1)
for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
df['mean_sec_' + str(i)] = df['seconds'].rolling(i).mean().shift(1)
for i in [3, 4, 12, 16, 17, 28, 36]:
df['skew_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).skew().shift(1)
return df
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
def enrich_all_data():
for root, dirs, files in os.walk("nba"):
for file in files:
if file.endswith(".csv"):
try:
path = os.path.join(root, file)
if path.find('fantasy') == -1 and path.find('2018.csv') != -1:
f = open(path)
print(path)
lines = f.readlines()
if len(lines) > 1:
df = pd.read_csv(path,
parse_dates=['date'],
date_parser=dateparse)
if not df.empty:
df.fillna(df.mean(), inplace=True)
df = enrich_player_df(df)
join = os.path.join(root, "fantasy")
if not os.path.exists(join):
os.mkdir(join)
df.to_csv(os.path.join(root, "fantasy", file), index=False)
except Exception as inst:
print(file)
print(df.head())
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
| mit |
bmazin/SDR | Projects/ChannelizerSim/legacy/bin_width_1st_stage.py | 1 | 1524 |
import matplotlib.pyplot as plt
import scipy.signal
import numpy as np
import math
import random
from matplotlib.backends.backend_pdf import PdfPages
samples = 51200
L = samples/512
fs = 512e6
dt = 1/fs
time = [i*dt for i in range(samples)]
def pfb_fir(x):
N = len(x)
T = 4
L = 512
bin_width_scale = 2.5
dx = T*math.pi/L/T
X = np.array([n*dx-T*math.pi/2 for n in range(T*L)])
coeff = np.sinc(bin_width_scale*X/math.pi)*np.hanning(T*L)
y = np.array([0+0j]*(N-T*L))
for n in range((T-1)*L, N):
m = n%L
coeff_sub = coeff[L*T-m::-L]
y[n-T*L] = (x[n-(T-1)*L:n+L:L]*coeff_sub).sum()
return y
R = 100/5
#freqs = [i*1e5 + 6.0e6 for i in range(R)]
freqs = [i*5e4 + 6.0e6 for i in range(R*8)]
bin = []
bin_pfb = []
for f in freqs:
print f
signal = np.array([complex(math.cos(2*math.pi*f*t), math.sin(2*math.pi*f*t)) for t in time])
y = pfb_fir(signal)
bin_pfb.append(np.fft.fft(y[0:512])[10])
bin = np.array(bin)
bin_pfb = np.array(bin_pfb)
freqs = np.array(freqs)/1e6
b = scipy.signal.firwin(20, cutoff=0.125, window="hanning")
w,h = scipy.signal.freqz(b,1, 4*R, whole=1)
h = np.array(h[2*R:4*R].tolist()+h[0:2*R].tolist())
#h = np.array(h[20:40].tolist()+h[0:20].tolist())
fig = plt.figure()
ax0 = fig.add_subplot(111)
#ax0.plot(freqs, abs(fir9), '.', freqs, abs(fir10), '.', freqs, abs(fir11), '.')
ax0.plot(freqs, 10*np.log10(abs(bin_pfb)/512), 'k-')
ax0.set_xlabel('Frequency (MHz)')
ax0.set_ylabel('Gain (dB)')
ax0.set_ylim((-50,0))
plt.show()
#ax0.axvline(x = 10, linewidth=1, color='k')
| gpl-2.0 |
nistats/nistats | examples/03_second_level_models/plot_oasis.py | 1 | 6030 | """Voxel-Based Morphometry on Oasis dataset
========================================
This example uses Voxel-Based Morphometry (VBM) to study the relationship
between aging, sex and gray matter density.
The data come from the `OASIS <http://www.oasis-brains.org/>`_ project.
If you use it, you need to agree with the data usage agreement available
on the website.
It has been run through a standard VBM pipeline (using SPM8 and
NewSegment) to create VBM maps, which we study here.
VBM analysis of aging
---------------------
We run a standard GLM analysis to study the association between age
and gray matter density from the VBM data. We use only 100 subjects
from the OASIS dataset to limit the memory usage.
Note that more power would be obtained from using a larger sample of subjects.
"""
# Authors: Bertrand Thirion, <bertrand.thirion@inria.fr>, July 2018
# Elvis Dhomatob, <elvis.dohmatob@inria.fr>, Apr. 2014
# Virgile Fritsch, <virgile.fritsch@inria.fr>, Apr 2014
# Gael Varoquaux, Apr 2014
n_subjects = 100 # more subjects requires more memory
############################################################################
# Load Oasis dataset
# ------------------
from nilearn import datasets
oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
gray_matter_map_filenames = oasis_dataset.gray_matter_maps
age = oasis_dataset.ext_vars['age'].astype(float)
###############################################################################
# Sex is encoded as 'M' or 'F'. Hence, we make it a binary variable.
sex = oasis_dataset.ext_vars['mf'] == b'F'
###############################################################################
# Print basic information on the dataset.
print('First gray-matter anatomy image (3D) is located at: %s' %
oasis_dataset.gray_matter_maps[0]) # 3D data
print('First white-matter anatomy image (3D) is located at: %s' %
oasis_dataset.white_matter_maps[0]) # 3D data
###############################################################################
# Get a mask image: A mask of the cortex of the ICBM template.
gm_mask = datasets.fetch_icbm152_brain_gm_mask()
###############################################################################
# Resample the images, since this mask has a different resolution.
from nilearn.image import resample_to_img
mask_img = resample_to_img(
gm_mask, gray_matter_map_filenames[0], interpolation='nearest')
#############################################################################
# Analyse data
# ------------
#
# First, we create an adequate design matrix with three columns: 'age',
# 'sex', 'intercept'.
import pandas as pd
import numpy as np
intercept = np.ones(n_subjects)
design_matrix = pd.DataFrame(np.vstack((age, sex, intercept)).T,
columns=['age', 'sex', 'intercept'])
#############################################################################
# Let's plot the design matrix.
from nistats.reporting import plot_design_matrix
ax = plot_design_matrix(design_matrix)
ax.set_title('Second level design matrix', fontsize=12)
ax.set_ylabel('maps')
##########################################################################
# Next, we specify and fit the second-level model when loading the data and also
# smooth a little bit to improve statistical behavior.
from nistats.second_level_model import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask_img=mask_img)
second_level_model.fit(gray_matter_map_filenames,
design_matrix=design_matrix)
##########################################################################
# Estimating the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(second_level_contrast=[1, 0, 0],
output_type='z_score')
###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
from nistats.thresholding import map_threshold
from nilearn import plotting
_, threshold = map_threshold(
z_map, alpha=.05, height_control='fdr')
print('The FDR=.05-corrected threshold is: %.3g' % threshold)
display = plotting.plot_stat_map(
z_map, threshold=threshold, colorbar=True, display_mode='z',
cut_coords=[-4, 26],
title='age effect on grey matter density (FDR = .05)')
plotting.show()
###########################################################################
# We can also study the effect of sex by computing the contrast, thresholding it
# and plot the resulting map.
z_map = second_level_model.compute_contrast(second_level_contrast='sex',
output_type='z_score')
_, threshold = map_threshold(
z_map, alpha=.05, height_control='fdr')
plotting.plot_stat_map(
z_map, threshold=threshold, colorbar=True,
title='sex effect on grey matter density (FDR = .05)')
###########################################################################
# Note that there does not seem to be any significant effect of sex on
# grey matter density on that dataset.
###########################################################################
# Generating a report
# -------------------
# It can be useful to quickly generate a
# portable, ready-to-view report with most of the pertinent information.
# This is easy to do if you have a fitted model and the list of contrasts,
# which we do here.
from nistats.reporting import make_glm_report
icbm152_2009 = datasets.fetch_icbm152_2009()
report = make_glm_report(model=second_level_model,
contrasts=['age', 'sex'],
bg_img=icbm152_2009['t1'],
)
#########################################################################
# We have several ways to access the report:
# report # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()
| bsd-3-clause |
MPIBGC-TEE/CompartmentalSystems | notebooks/ELM_dask.py | 1 | 1730 | #from dask.distributed import Client
import xarray as xr
import numpy as np
import pandas as pd
import importlib
import ELMlib
importlib.reload(ELMlib)
#client = Client(n_workers=2, threads_per_worker=2, memory_limit='1GB')
#client
#ds = xr.open_dataset('../Data/14C_spinup_holger_fire.2x2_small.nc')
from netCDF4 import Dataset
ds = Dataset('../Data/14C_spinup_holger_fire.2x2_small.nc')
#lat, lon = ds.coords['lat'], ds.coords['lon']
lat, lon = ds['lat'][:], ds['lon'][:]
lat_indices, lon_indices = np.meshgrid(
range(len(lat)),
range(len(lon)),
indexing='ij'
)
lats, lons = np.meshgrid(lat, lon, indexing='ij')
df_pd = pd.DataFrame(
{
'cell_nr': range(len(lat)*len(lon)),
'lat_index': lat_indices.flatten(),
'lon_index': lon_indices.flatten(),
'lat': lats.flatten(),
'lon': lons.flatten()
}
)
import dask.array as da
import dask.dataframe as dask_df
df_dask = dask_df.from_pandas(df_pd, npartitions=4)
df_dask
parameter_set = ELMlib.load_parameter_set(
ds_filename = '../Data/14C_spinup_holger_fire.2x2_small.nc',
time_shift = -198*365,
nstep = 10
)
def func(line):
location_dict = {
'cell_nr': int(line.cell_nr),
'lat_index': int(line.lat_index),
'lon_index': int(line.lon_index)
}
cell_nr, log, xs_12C_data, us_12C_data, rs_12C_data= ELMlib.load_model_12C_data(parameter_set, location_dict)
return cell_nr, log, xs_12C_data, us_12C_data, rs_12C_data
df_dask_2 = df_dask.apply(func, axis=1, meta=('A', 'object'))
df_dask_2.compute()
type(df_dask_2)
df_dask_2
list(df_dask_2)
pd.DataFrame(list(df_dask_2), columns=('cell_nr', 'log', 'xs_12C_data', 'us_12C_data', 'rs_12C_data'))
| mit |
ky822/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
cpcloud/ibis | ibis/pandas/execution/tests/test_structs.py | 1 | 2175 | from collections import OrderedDict
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
@pytest.fixture(scope="module")
def value():
return OrderedDict([("fruit", "pear"), ("weight", 0)])
@pytest.fixture(scope="module")
def struct_client(value):
df = pd.DataFrame(
{
"s": [
OrderedDict([("fruit", "apple"), ("weight", None)]),
value,
OrderedDict([("fruit", "pear"), ("weight", 1)]),
],
"key": list("aab"),
"value": [1, 2, 3],
}
)
return ibis.pandas.connect({"t": df})
@pytest.fixture
def struct_table(struct_client):
return struct_client.table(
"t",
schema={
"s": dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
},
)
def test_struct_field_literal(value):
struct = ibis.literal(value)
assert struct.type() == dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
expr = struct.fruit
result = ibis.pandas.execute(expr)
assert result == "pear"
expr = struct.weight
result = ibis.pandas.execute(expr)
assert result == 0
def test_struct_field_series(struct_table):
t = struct_table
expr = t.s.fruit
result = expr.execute()
expected = pd.Series(["apple", "pear", "pear"], name="fruit")
tm.assert_series_equal(result, expected)
def test_struct_field_series_group_by_key(struct_table):
t = struct_table
expr = t.groupby(t.s.fruit).aggregate(total=t.value.sum())
result = expr.execute()
expected = pd.DataFrame(
[("apple", 1), ("pear", 5)], columns=["fruit", "total"]
)
tm.assert_frame_equal(result, expected)
def test_struct_field_series_group_by_value(struct_table):
t = struct_table
expr = t.groupby(t.key).aggregate(total=t.s.weight.sum())
result = expr.execute()
# these are floats because we have a NULL value in the input data
expected = pd.DataFrame([("a", 0.0), ("b", 1.0)], columns=["key", "total"])
tm.assert_frame_equal(result, expected)
| apache-2.0 |
mobarski/sandbox | rsm/v4.py | 2 | 5658 | from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# - sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len
# IDEA:
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# NEXT VERSION:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.N = n
self.M = m
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
# ---[ core ]---------------------------------------------------------------
# TODO -- input length vs mem length
def scores(self, input, boost=False, noise=False, fatigue=0, dropout=0.0): # -> dict[i] -> scores
"""
input -- sparse binary features
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
tow = self.tow
N = self.N
M = self.M
t = self.t
scores = {}
for j in mem:
scores[j] = len(input & mem[j])
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
if fatigue:
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, k, decay=0.0, dropout=0.0, fatigue=0,
negative=False, boost=True, noise=True):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
M = self.M
t = self.t
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
scores = self.scores(input, boost=boost, noise=noise, dropout=dropout, fatigue=fatigue)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------
def fit(self, X, Y):
for x,y in zip (X,Y):
negative = not y
self.learn(x,negative=negative)
def score_many(self, X, k=1, method=1):
out = []
for x in X:
s = self.score_one(x,k,method)
out += [s]
return out
def transform(self, X, k=1, method=1, cutoff=0.5):
out = []
for s in self.score_many(X,k,method):
y = 1 if s>=cutoff else 0
out += [y]
return out
def confusion(self, X, Y, k=1, method=1, cutoff=0.5):
PY = self.transform(X,k,method,cutoff)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score(self, X, Y, k=1, method=1, cutoff=0.5, kind='acc'):
c = self.confusion(X,Y,k,method,cutoff)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
if kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='acc':
return (tp+tn) / (p+n)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
def score_one(self, input, k=1, method=1):
"aggregate scores to scalar"
scores = self.scores(input)
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(self.M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*self.M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(self.M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/self.M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(self.M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/self.M
return min(1.0,score)
def stats(self,prefix=''):
vol_v = self.vol.values()
mem_v = self.mem.values()
out = {}
out['m_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/self.N
out['m_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/self.N
out['m_full'] = sum([1.0 if len(x)==self.M else 0.0 for x in mem_v])/self.N
out['m_avg'] = sum([1.0*len(x) for x in mem_v])/(self.N*self.M)
return {k:v for k,v in out.items() if k.startswith(prefix)}
| mit |
c11/yatsm | yatsm/classification/__init__.py | 3 | 2042 | """ Module storing classifiers for YATSM
Contains utilities and helper classes for classifying timeseries generated
using YATSM change detection.
"""
import logging
from sklearn.ensemble import RandomForestClassifier
import yaml
from ..errors import AlgorithmNotFoundException
logger = logging.getLogger('yatsm')
_algorithms = {
'RandomForest': RandomForestClassifier
}
def cfg_to_algorithm(config_file):
""" Return instance of classification algorithm helper from config file
Args:
config_file (str): location of configuration file for algorithm
Returns:
tuple: scikit-learn estimator (object) and configuration file (dict)
Raises:
KeyError: raise if configuration file is malformed
AlgorithmNotFoundException: raise if algorithm is not implemented in
YATSM
TypeError: raise if configuration file cannot be used to initialize
the classifier
"""
# Determine which algorithm is used
try:
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
except Exception as e:
logger.error('Could not read config file {} ({})'
.format(config_file, str(e)))
raise
algo_name = config['algorithm']
if algo_name not in _algorithms.keys():
raise AlgorithmNotFoundException(
'Could not process unknown algorithm named "%s"' % algo_name)
else:
algo = _algorithms[algo_name]
if algo_name not in config:
logger.warning('%s algorithm parameters not found in config file %s. '
'Using default values.' % (algo_name, config_file))
config[algo_name] = {}
# Try to load algorithm using hyperparameters from config
try:
sklearn_algo = algo(**config[algo_name].get('init', {}))
except TypeError:
logger.error('Cannot initialize %s classifier. Config file %s '
'contains unknown options' % (algo_name, config_file))
raise
return sklearn_algo, config
| mit |
SKIRT/PTS | magic/plot/imagegrid.py | 1 | 106384 | # -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.plot.imagegrid Contains the ImageGridPlotter classes.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import aplpy
from abc import ABCMeta, abstractproperty
import matplotlib.pyplot as plt
from matplotlib import cm
from collections import OrderedDict, defaultdict
# Import the relevant PTS classes and modules
from ..tools.plotting import get_vmin_vmax
from ...core.tools import filesystem as fs
from ..core.frame import Frame
from ...core.basics.log import log
from ...core.basics.configurable import Configurable
from ...core.tools.utils import lazyproperty, memoize_method
from ...core.tools import sequences
from ..core.image import Image
from ...core.basics.distribution import Distribution
from ...core.basics.plot import MPLFigure
from ...core.basics.composite import SimplePropertyComposite
from ...core.basics.plot import normal_colormaps
from ..core.list import uniformize
from ...core.tools import numbers
from ...core.tools import types
# ------------------------------------------------------------------------------
light_theme = "light"
dark_theme = "dark"
themes = [light_theme, dark_theme]
# ------------------------------------------------------------------------------
default_cmap = "inferno"
default_residual_cmap = 'RdBu'
default_absolute_residual_cmap = "OrRd"
# ------------------------------------------------------------------------------
# Initialize dictionary for light theme settings
light_theme_settings = OrderedDict()
# Set parameters
light_theme_settings['axes.facecolor'] = 'white'
light_theme_settings['savefig.facecolor'] = 'white'
light_theme_settings['axes.edgecolor'] = 'black'
light_theme_settings['xtick.color'] = 'black'
light_theme_settings['ytick.color'] = 'black'
light_theme_settings["axes.labelcolor"] = 'black'
light_theme_settings["text.color"] = 'black'
# light_theme_settings["axes.titlecolor"]='black'
# ------------------------------------------------------------------------------
# Initialize dictionary for dark theme settings
dark_theme_settings = OrderedDict()
# Set parameters
dark_theme_settings['axes.facecolor'] = 'black'
dark_theme_settings['savefig.facecolor'] = 'black'
dark_theme_settings['axes.edgecolor'] = 'white'
dark_theme_settings['xtick.color'] = 'white'
dark_theme_settings['ytick.color'] = 'white'
dark_theme_settings["axes.labelcolor"] ='white'
dark_theme_settings["text.color"] = 'white'
#plt.rcParams["axes.titlecolor"] = 'white'
# ------------------------------------------------------------------------------
class ImagePlotSettings(SimplePropertyComposite):
"""
This class ...
"""
__metaclass__ = ABCMeta
# ------------------------------------------------------------------------------
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ImagePlotSettings, self).__init__()
# Define properties
self.add_property("label", "string", "label for the image", None)
self.add_property("vmin", "real", "plotting minimum")
self.add_property("vmax", "real", "plotting maximum")
self.add_boolean_property("soft_vmin", "soft vmin", False) #, None) # use None as default to use plotter config if not defined
self.add_boolean_property("soft_vmax", "soft vmax", False) #, None) # use None as default to use plotter config if not defined
self.add_property("cmap", "string", "colormap", choices=normal_colormaps)
# ------------------------------------------------------------------------------
class ImageGridPlotter(Configurable):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ImageGridPlotter, self).__init__(*args, **kwargs)
# The figure
self.figure = None
# The grid
self.grid = None
# The plots
self.plots = None
# The settings
self.settings = defaultdict(self.image_settings_class)
# -----------------------------------------------------------------
@abstractproperty
def image_settings_class(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def names(self):
"""
This function ...
:return:
"""
pass
# ------------------------------------------------------------------------------
@property
def light(self):
return self.config.theme == light_theme
# -----------------------------------------------------------------
@property
def dark(self):
return self.config.theme == dark_theme
# -----------------------------------------------------------------
@lazyproperty
def text_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def frame_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def background_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "white"
# Dark theme
elif self.dark: return "black"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@abstractproperty
def first_frame(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@lazyproperty
def center(self):
"""
This function ...
:return:
"""
# Center coordinate is defined
if self.config.center is not None: return self.config.center
# Not defined?
return self.first_frame.center_sky
# -----------------------------------------------------------------
@property
def ra_center(self):
return self.center.ra
# ------------------------------------------------------------------------------
@property
def dec_center(self):
return self.center.dec
# ------------------------------------------------------------------------------
@lazyproperty
def ra_center_deg(self):
return self.ra_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def dec_center_deg(self):
return self.dec_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def spacing_deg(self):
return self.config.spacing.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def radius_deg(self):
return self.config.radius.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def colormap(self):
return cm.get_cmap(self.config.cmap)
# -----------------------------------------------------------------
@lazyproperty
def nan_color(self):
if self.config.nan_color is not None: return self.config.nan_color
else: return self.colormap(0)
# -----------------------------------------------------------------
@lazyproperty
def theme_settings(self):
if self.light: return light_theme_settings
elif self.dark: return dark_theme_settings
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ImageGridPlotter, self).setup(**kwargs)
# plt.rcParams.update({'font.size':20})
plt.rcParams["axes.labelsize"] = self.config.axes_label_size # 16 #default 20
plt.rcParams["xtick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["ytick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["legend.fontsize"] = self.config.legend_fontsize # 10 #default 14
plt.rcParams["legend.markerscale"] = self.config.legend_markers_cale
plt.rcParams["lines.markersize"] = self.config.lines_marker_size # 4 #default 4
plt.rcParams["axes.linewidth"] = self.config.linewidth
# Set theme-specific settings
for label in self.theme_settings: plt.rcParams[label] = self.theme_settings[label]
# plt.rcParams['xtick.major.size'] = 5
# plt.rcParams['xtick.major.width'] = 2
# plt.rcParams['ytick.major.size'] = 5
# plt.rcParams['ytick.major.width'] = 2
# ------------------------------------------------------------------------------
def plot_images(images, **kwargs):
"""
This function ...
:param images:
:param kwargs:
:return:
"""
# Create the plotter
plotter = StandardImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(images=images)
# -----------------------------------------------------------------
class StandardImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImagePlotSettings, self).__init__(**kwargs)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
class StandardImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.frames = OrderedDict()
# The error frames
self.errors = OrderedDict()
# The masks
self.masks = OrderedDict()
# The regions
self.regions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
"""
This function ...
:return:
"""
return StandardImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
@property
def names(self):
"""
This function ...
:return:
"""
return self.frames.keys()
# ------------------------------------------------------------------------------
def add_image(self, name, image, errors=None, mask=None, regions=None, replace=False, settings=None):
"""
This function ...
:param name:
:param image:
:param errors:
:param mask:
:param regions:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Image is passed
if isinstance(image, Image):
# Get the frame
frame = image.primary
# Get errors?
# Get mask?
# Get regions?
# Frame is passed
elif isinstance(image, Frame): frame = image
# Invalid
else: raise ValueError("Invalid value for 'image': must be Frame or Image")
# Add frame
self.frames[name] = frame
# Add errors
if errors is not None: self.errors[name] = errors
# Add regions
if regions is not None: self.regions[name] = regions
# Add mask
if mask is not None: self.masks[name] = mask
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Images
if self.config.write_images: self.write_images()
# Frames
if self.config.write_frames: self.write_frames()
# Masks
if self.config.write_masks: self.write_masks()
# Regions
if self.config.write_regions: self.write_regions()
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_frames(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_masks(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_regions(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# ------------------------------------------------------------------------------
images_name = "images"
observations_name = "observations"
models_name = "models"
errors_name = "errors"
model_errors_name = "model_errors"
residuals_name = "residuals"
distributions_name = "distributions"
settings_name = "settings"
# ------------------------------------------------------------------------------
observation_name = "observation"
model_name = "model"
observation_or_model = [observation_name, model_name]
# ------------------------------------------------------------------------------
horizontal_mode, vertical_mode = "horizontal", "vertical"
default_direction = vertical_mode
directions = [horizontal_mode, vertical_mode]
# ------------------------------------------------------------------------------
class ResidualImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ResidualImagePlotSettings, self).__init__()
# Define properties
self.add_property("residual_amplitude", "percentage", "amplitude of the residual plots")
self.add_boolean_property("soft_residual_amplitude", "soft residual amplitude", False) #, None) # use None as default to use plotter config if not defined
self.add_property("residual_cmap", "string", "colormap for the residual plots") # no choices because can be absolute or not
# Set properties
self.set_properties(kwargs)
# ------------------------------------------------------------------------------
def plot_residuals(observations, models, **kwargs):
"""
This function ...
:param observations:
:param models:
:param kwargs:
:return:
"""
# Create the plotter
plotter = ResidualImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(observations=observations, models=models)
# -----------------------------------------------------------------
class ResidualImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ResidualImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.observations = OrderedDict()
self.errors = OrderedDict()
self.models = OrderedDict()
self.model_errors = OrderedDict()
self.residuals = OrderedDict()
# The residual distributions
self.distributions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
return ResidualImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create the residual frames
self.create_residuals()
# Create the residual distributions
self.create_distributions()
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ResidualImageGridPlotter, self).setup(**kwargs)
# Load the images
if kwargs.get(images_name, None) is not None: self.add_images(kwargs.pop(images_name))
if kwargs.get(observations_name, None) is not None: self.add_observations(kwargs.pop(observations_name))
if kwargs.get(models_name, None) is not None: self.add_models(kwargs.pop(models_name))
if kwargs.get(errors_name, None) is not None: self.add_error_maps(kwargs.pop(errors_name))
if kwargs.get(residuals_name, None) is not None: self.add_residual_maps(kwargs.pop(residuals_name))
# Nothing added
if self.config.from_directory is not None: self.load_from_directory(self.config.from_directory)
elif not self.has_images: self.load_from_directory(self.config.path)
# Initialize the figure
self.initialize_figure()
# ------------------------------------------------------------------------------
@property
def figsize(self):
return (15,10)
# ------------------------------------------------------------------------------
@property
def horizontal(self):
return self.config.direction == horizontal_mode
# ------------------------------------------------------------------------------
@property
def vertical(self):
return self.config.direction == vertical_mode
# ------------------------------------------------------------------------------
@lazyproperty
def npanels(self):
if self.config.distributions: return 4 # observation, model, residual, distribution
else: return 3 # observation, model, residual
# ------------------------------------------------------------------------------
@lazyproperty
def nrows(self):
if self.horizontal: return self.npanels
elif self.vertical: return self.nimages
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@lazyproperty
def ncolumns(self):
if self.horizontal: return self.nimages
elif self.vertical: return self.npanels
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@property
def share_x(self):
return True
# ------------------------------------------------------------------------------
@property
def share_y(self):
return True
# ------------------------------------------------------------------------------
def initialize_figure(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Initializing the figure with size " + str(self.figsize) + " ...")
# Create the plot
self.figure = MPLFigure(size=self.figsize)
# Create plots
#self.plots = self.figure.create_grid(self.nrows, self.ncolumns, sharex=self.share_x, sharey=self.share_y)
# Create grid
self.grid = self.figure.create_gridspec(self.nrows, self.ncolumns, hspace=0.0, wspace=0.0)
# Initialize structure to contain the plots
#print("NCOLUMNS", self.ncolumns)
#print("NROWS", self.nrows)
self.plots = [[None for i in range(self.ncolumns)] for j in range(self.nrows)]
# ------------------------------------------------------------------------------
@property
def all_names(self):
return sequences.combine_unique(self.observation_names, self.model_names, self.errors_names, self.residuals_names)
# ------------------------------------------------------------------------------
@property
def observation_names(self):
return self.observations.keys()
# ------------------------------------------------------------------------------
def has_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.observation_names
# ------------------------------------------------------------------------------
@property
def model_names(self):
return self.models.keys()
# ------------------------------------------------------------------------------
def has_model(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_names
# ------------------------------------------------------------------------------
@property
def errors_names(self):
return self.errors.keys()
# ------------------------------------------------------------------------------
def has_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.errors
# ------------------------------------------------------------------------------
@property
def model_errors_names(self):
return self.model_errors.keys()
# ------------------------------------------------------------------------------
def has_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_errors
# ------------------------------------------------------------------------------
@property
def residuals_names(self):
return self.residuals.keys()
# ------------------------------------------------------------------------------
def has_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.residuals
# ------------------------------------------------------------------------------
@property
def distribution_names(self):
return self.distributions.keys()
# ------------------------------------------------------------------------------
def has_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.distributions
# ------------------------------------------------------------------------------
@property
def settings_names(self):
return self.settings.keys()
# ------------------------------------------------------------------------------
def has_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.settings_names
# ------------------------------------------------------------------------------
@property
def names(self):
return self.observation_names
# ------------------------------------------------------------------------------
@property
def first_name(self):
return self.names[0]
# ------------------------------------------------------------------------------
@property
def first_observation(self):
return self.get_observation(self.first_name)
# ------------------------------------------------------------------------------
@property
def first_frame(self):
return self.first_observation
# ------------------------------------------------------------------------------
@property
def nimages(self):
return len(self.names)
# ------------------------------------------------------------------------------
@property
def has_images(self):
return self.nimages > 0
# ------------------------------------------------------------------------------
def add_image(self, name, observation, model=None, errors=None, model_errors=None, residuals=None, replace=False,
settings=None):
"""
This function ...
:param name:
:param observation:
:param model:
:param errors:
:param model_errors:
:param residuals:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Check type of the image
if isinstance(observation, Image):
# Get observation frame
if observation_name in observation.frame_names: observation = observation.frames[observation_name]
else: observation = observation.primary
# Get model frame
if model_name in observation.frame_names:
if model is not None: raise ValueError("Cannot pass model frame if image contains model frame")
model = observation.frames[model_name]
# Get errors frame
if errors_name in observation.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = observation.frames[errors_name]
# Get model errors frame
if model_errors_name in observation.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if image contains model error map")
model_errors = observation.frames[model_errors_name]
# Get residuals frame
if residuals_name in observation.frame_names:
if residuals is not None: raise ValueError("Cannot pass residual map if image contains residual map")
residuals = observation.frames[residuals_name]
# Check the type of the model image
if model is not None and isinstance(model, Image):
# Get the model frame
if model_name in model.frame_names: model = model.frames[model_name]
else: model = model.primary
# Get the model errors frame
if model_errors_name in model.frame_names:
if errors_name in model.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains model error map")
model_errors = model.frames[model_errors_name]
elif errors_name in model.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains error map")
model_errors = model.frames[errors_name]
# Add observation
self.observations[name] = observation
# Add model
if model is not None: self.models[name] = model
# Add errors
if errors is not None: self.errors[name] = errors
# Add model errors
if model_errors is not None: self.model_errors[name] = model_errors
# Add residuals
if residuals is not None: self.residuals[name] = residuals
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def add_observation(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get observation frame
if observation_name in frame.frame_names: frame = frame.frames[observation_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", observation_name, errors_name]): raise ValueError("Observation image contains too many frames")
# Add observation frame
self.observations[name] = frame
# Add error map
if errors is not None: self.errors[name] = errors
# ------------------------------------------------------------------------------
def add_model(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get model frame
if model_name in frame.frame_names: frame = frame.frames[model_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if model_errors_name in frame.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
elif model_errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[model_errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", model_name, errors_name, model_errors_name]): raise ValueError("Model image contains too many frames")
# Add model frame
self.models[name] = frame
# Add error map
if errors is not None: self.model_errors[name] = errors
# ------------------------------------------------------------------------------
def add_errors(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.errors[name] = frame
# ------------------------------------------------------------------------------
def add_model_errors(self, name, frame):
"""
Thisn function ...
:param name:
:param frame:
:return:
"""
# Add
self.model_errors[name] = frame
# ------------------------------------------------------------------------------
def add_residuals(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.residuals[name] = frame
# ------------------------------------------------------------------------------
def add_distribution(self, name, distribution):
"""
This function ...
:param name:
:param distribution:
:return:
"""
# Add
self.distributions[name] = distribution
# -----------------------------------------------------------------
def add_settings(self, name, **settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def set_settings(self, name, settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name] = settings
# ------------------------------------------------------------------------------
def set_setting(self, name, setting_name, value):
"""
This function ...
:param name:
:param setting_name:
:param value:
:return:
"""
# Set
self.settings[name][setting_name] = value
# ------------------------------------------------------------------------------
def add_images(self, images):
"""
This function ...
:param images:
:return:
"""
# Debugging
log.debug("Adding images ...")
# Loop over the images
for name in images:
# Get the image
image = images[name]
# Add
self.add_image(name, image)
# ------------------------------------------------------------------------------
def add_observations(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding observations ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_observation(name, frame)
# ------------------------------------------------------------------------------
def add_models(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding models ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_model(name, frame)
# ------------------------------------------------------------------------------
def add_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_errors(name, frame)
# ------------------------------------------------------------------------------
def add_model_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding model error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_model_errors(name, frame)
# ------------------------------------------------------------------------------
def add_residual_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding residual maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Are there FITS files in the directory?
if fs.has_files_in_path(path, extension="fits"): self.load_images_from_directory(path)
# Are there subdirectories?
elif fs.has_directories_in_path(path):
# Determine paths
images_path = fs.join(path, images_name)
observations_path = fs.join(path, observations_name)
models_path = fs.join(path, models_name)
residuals_path = fs.join(path, residuals_name)
settings_path = fs.join(path, settings_name)
# Load observations
if fs.is_directory(images_path): self.load_images_from_directory(path)
if fs.is_directory(observations_path): self.load_observations_from_directory(path)
if fs.is_directory(models_path): self.load_models_from_directory(path)
if fs.is_directory(residuals_path): self.load_residuals_from_directory(path)
if fs.is_directory(settings_path): self.load_settings_from_directory(path)
# No FITS files nor subdirectories
else: raise IOError("No image files nor subdirectories found in '" + path + "'")
# ------------------------------------------------------------------------------
def load_images_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading image files from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading '" + name + "' image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Add the image
self.add_image(name, image)
# ------------------------------------------------------------------------------
def load_observations_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading observed image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' observed image ...")
# Get header
#header = get_header(filepath)
# Get the filter
#fltr = get_filter(name, header=header)
# Check whether the filter is in the list of filters to be plotted
#if fltr not in config.filters: continue
# Get the index for this filter
#index = config.filters.index(fltr)
# Load the image
#frame = Frame.from_file(filepath)
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_observation(name, image)
# ------------------------------------------------------------------------------
def load_models_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading model image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "name"]):
# Debugging
log.debug("Loading the '" + name + "' model image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_model(name, image)
# ------------------------------------------------------------------------------
def load_residuals_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading residual image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' residual map ...")
# Load the frame
frame = Frame.from_file(filepath)
# Add the map
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_settings_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading plotting settings from '" + path + "' ...")
# Loop over the dat files
for name, filepath in fs.files_in_path(path, extension="dat", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' settings ...")
# Load the settings
settings = ImagePlotSettings.from_file(filepath)
# Set the settings
self.set_settings(name, settings)
# ------------------------------------------------------------------------------
def get_observation_or_model(self, name):
"""
This function ...
:param name:
:return:
"""
if self.has_observation(name): return self.get_observation(name)
elif self.has_model(name): return self.get_model(name)
else: raise ValueError("Doesn't have observation or model for name '" + name + "'")
# ------------------------------------------------------------------------------
def get_filter(self, name):
"""
This function ...
:param name:
:return:
"""
return self.get_observation_or_model(name).filter
# ------------------------------------------------------------------------------
def get_wcs(self, name):
"""
Thisf unction ...
:param name:
:return:
"""
return self.get_observation_or_model(name).wcs
# ------------------------------------------------------------------------------
def calculate_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
# Get the frames
#observation = self.observations[name]
#model = self.models[name]
# Uniformize
observation, model = uniformize(self.observations[name], self.models[name])
# Error-weighed residuals
if self.config.weighed:
if self.config.weighing_reference == observation_name:
if not self.has_errors(name): raise ValueError("No errors for the '" + name + "' image")
errors = self.get_errors(name)
elif self.config.weighing_reference == model_name:
if not self.has_model_errors(name): raise ValueError("No model errors for the '" + name + "' image")
errors = self.get_model_errors(name)
else: raise ValueError("Invalid value for 'weighing_reference'")
# Calculate
res = Frame((model - observation) / errors, wcs=observation.wcs)
# Relative residuals
elif self.config.relative: res = Frame((model - observation) / observation, wcs=observation.wcs)
# Absolute residuals
else: res = Frame(model - observation, wcs=observation.wcs)
# Take absolute values?
if self.config.absolute: res = res.absolute
# Return the residual
return res
# ------------------------------------------------------------------------------
def create_residuals(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Creating the residual frames ...")
# Loop over the observed images
for name in self.names:
# Checks
if not self.has_model(name): continue
if self.has_residuals(name): continue
# Debugging
log.debug("Creating residual frame for the '" + name + "' image ...")
# Create
res = self.calculate_residuals(name)
# Add the residuals frame
self.residuals[name] = res
# ------------------------------------------------------------------------------
def create_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the residual distributions ...")
# Loop over the residual maps
for name in self.residuals_names:
# Checks
if self.has_distribution(name): continue
# Debugging
log.debug("Creating distribution for the '" + name + "' residuals ...")
# Get the residual map
residuals = self.get_residuals(name)
# Create the distribution
distribution = Distribution.from_data("Residual", residuals, sigma_clip=self.config.sigma_clip_distributions, sigma_level=self.config.sigma_clip_level)
# Add the distribution
self.distributions[name] = distribution
# ------------------------------------------------------------------------------
def get_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return self.observations[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_observation_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add observation frame
image.add_frame(self.get_observation(name), observation_name)
# Add error map
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_model(self, name):
"""
This function ...
:param name:
:return:
"""
return self.models[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_model_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add model frame
image.add_frame(self.get_model(name), model_name)
# Add error map
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.errors[name]
# ------------------------------------------------------------------------------
def get_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.model_errors[name]
# ------------------------------------------------------------------------------
def get_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return self.residuals[name]
# ------------------------------------------------------------------------------
def get_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return self.distributions[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create the image
image = Image(name=name)
# Add the observation
if self.has_observation(name): image.add_frame(self.get_observation(name), observation_name)
# Add the model
if self.has_model(name): image.add_frame(self.get_model(name), model_name)
# Add the errors
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Add the model errors
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), model_errors_name)
# Add the residuals
if self.has_residuals(name): image.add_frame(self.get_residuals(name), residuals_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return self.settings[name]
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write observations
if self.config.write_observations: self.write_observations()
# Write models
if self.config.write_models: self.write_models()
# Write residual frames
if self.config.write_residuals: self.write_residuals()
# Write the images
if self.config.write_images: self.write_images()
# Write the distributions
if self.config.write_distributions: self.write_distributions()
# Write the settings
if self.config.write_settings: self.write_settings()
# ------------------------------------------------------------------------------
@lazyproperty
def images_path(self):
return self.output_path_directory(images_name)
# ------------------------------------------------------------------------------
@lazyproperty
def observations_path(self):
return self.output_path_directory(observations_name)
# ------------------------------------------------------------------------------
@lazyproperty
def models_path(self):
return self.output_path_directory(models_name)
# ------------------------------------------------------------------------------
@lazyproperty
def residuals_path(self):
return self.output_path_directory(residuals_name)
# ------------------------------------------------------------------------------
@lazyproperty
def distributions_path(self):
return self.output_path_directory(distributions_name)
# ------------------------------------------------------------------------------
@lazyproperty
def settings_path(self):
return self.output_path_directory(settings_name)
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over all images
for name in self.all_names:
# Determine path
path = fs.join(self.images_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' image ...")
# Get image
image = self.get_image(name)
# Save the image
image.saveto(path)
# ------------------------------------------------------------------------------
def write_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the observed frames ...")
# Loop over the observed images
for name in self.observation_names:
# Determine the path
path = fs.join(self.observations_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' observed image ...")
# Get the frame
frame = self.get_observation_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the model frames ...")
# Loop over the model images
for name in self.model_names:
# Determine the path
path = fs.join(self.models_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' model image ...")
# Get the frame
frame = self.get_model_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual frames ...")
# Loop over the residual maps
for name in self.residuals_names:
# Determine the path
path = fs.join(self.residuals_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual frame ...")
# Get the residual map
frame = self.get_residuals(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual distributions ...")
# Loop over the distributions
for name in self.distribution_names:
# Determine the path
path = fs.join(self.distributions_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual distribution ...")
# Get the distribution
distribution = self.get_distribution(name)
# Save
distribution.saveto(path)
# ------------------------------------------------------------------------------
def write_settings(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the plotting settings ...")
# Loop over the settings
for name in self.settings_names:
# Determine the path
path = fs.join(self.settings_path, name + ".dat")
# Debugging
log.debug("Writing the '" + name + "' plotting settings ...")
# Get the settings
settings = self.get_settings(name)
# Save
settings.saveto(path)
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot observations
self.plot_observations()
# Plot models
self.plot_models()
# Plot residuals
self.plot_residuals()
# Plot distributions
if self.config.distributions: self.plot_distributions()
# Finish the plot
self.finish()
# ------------------------------------------------------------------------------
def get_label(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return name
# Get the settings
settings = self.get_settings(name)
# Return
if settings.label is not None: return settings.label
else: return name
# ------------------------------------------------------------------------------
def get_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return self.config.cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.cmap is not None: return settings.cmap
else: return self.config.cmap
# ------------------------------------------------------------------------------
@property
def config_residual_cmap(self):
"""
This function ...
:return:
"""
if self.config.absolute: return self.config.absolute_residual_cmap
else: return self.config.residual_cmap
# ------------------------------------------------------------------------------
def get_residual_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config_residual_cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.residual_cmap is not None: return settings.residual_cmap
else: return self.config_residual_cmap
# ------------------------------------------------------------------------------
def get_limits(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.vmin, self.config.vmax, False, False
# Get the settings
settings = self.get_settings(name)
# Get limits
vmin = settings.vmin if settings.vmin is not None else self.config.vmin
vmax = settings.vmax if settings.vmax is not None else self.config.vmax
# Get flags
soft_vmin = settings.soft_vmin if settings.vmin is not None else False # don't use True flag if vmin is not set in settings
soft_vmax = settings.soft_vmax if settings.vmax is not None else False # don't use True flag if vmax is not set in settings
# Return
return vmin, vmax, soft_vmin, soft_vmax
# ------------------------------------------------------------------------------
def get_residual_amplitude(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.residual_amplitude, False
# Get the settings
settings = self.get_settings(name)
# Get amplitude
amplitude = settings.residual_amplitude if settings.residual_amplitude is not None else self.config.residual_amplitude
# Get flag
soft_amplitude = settings.soft_residual_amplitude if settings.residual_amplitude is not None else False # don't use True flag if amplitude is not set in settings
# Return
return amplitude, soft_amplitude
# ------------------------------------------------------------------------------
def set_limits(self, name, vmin, vmax, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param name:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Set vmin and vmax
self.add_settings(name, vmin=vmin, vmax=vmax)
# Set flags
if soft_vmin is not None: self.set_setting(name, "soft_vmin", soft_vmin)
if soft_vmax is not None: self.set_setting(name, "soft_vmax", soft_vmax)
# ------------------------------------------------------------------------------
def get_vmin_vmax(self, frame, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Defined?
has_vmin = vmin is not None
has_vmax = vmax is not None
# Vmin and vmax don't have to be calculated
if has_vmin and has_vmax and (not soft_vmin) and (not soft_vmax): return vmin, vmax
# Calculate vmin and or vmax
return get_vmin_vmax(frame.data, interval=self.config.interval, zmin=vmin, zmax=vmax, soft_zmin=soft_vmin, soft_zmax=soft_vmax)
# ------------------------------------------------------------------------------
def get_residual_vmin_vmax(self, frame, amplitude=None, soft_amplitude=False):
"""
This function ...
:param frame:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Defined?
if amplitude is not None and not soft_amplitude:
if self.config.absolute: return 0., amplitude
else: return -amplitude, amplitude
# Calculate vmin and or vmax
if self.config.absolute: return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=0, zmax=amplitude, soft_zmin=False, soft_zmax=soft_amplitude)
else:
zmin = -amplitude if amplitude is not None else None
zmax = amplitude
return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=zmin, zmax=zmax, soft_zmin=soft_amplitude, soft_zmax=soft_amplitude, around_zero=True, symmetric=True)
# ------------------------------------------------------------------------------
def get_observation_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 0
if self.horizontal: return 0, index
# Vertical
#elif self.vertical: return 0, index
elif self.vertical: return index, 0
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_model_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 1
if self.horizontal: return 1, index
# Vertical
#elif self.vertical: return 1, index
elif self.vertical: return index, 1
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_residuals_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 2
if self.horizontal: return 2, index
# Vertical
#elif self.vertical: return 2, index
elif self.vertical: return index, 2
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_distribution_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 3
if self.horizontal: return 3, index
# Vertical
#elif self.vertical: return 3, index
elif self.vertical: return index, 3
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_observation_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_observation_row_col(index)
#print(self.grid.get_geometry())
#print(self.grid.get_height_ratios())
# Return the grid spec
#if return_row_col: return self.grid[row, col], row, col
#else: return self.grid[row, col]
#if return_row_col: return self.grid[index], row, col
#else: return self.grid[index]
# No, no, this was a mistake with 'get_observation_row_col'
#if return_row_col: return self.grid[col, row], row, col # WHY?
#else: return self.grid[col, row] # WHY?
# This was right after all
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_model_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_model_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_residuals_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_residuals_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_distribution_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_distribution_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def create_observation_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_observation_spec(index, return_row_col=True)
#print(spec)
#print("ROW", row, "COL", col)
# Get coordinates of the subplot
#points = spec.get_position(self.figure.figure).get_points()
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
# needs [xmin, ymin, dx, dy]
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_model_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_model_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_residuals_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_residuals_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def _plot_observation(self, index, frame, cmap, label=None, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param label:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_observation_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color or frame
plot.frame.set_color(self.frame_color)
# FOR FIRST
#f1._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#f1._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# Tick settings
plot._ax2.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax2.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# Set image background color
plot.set_nan_color(self.nan_color)
# FOR FIRST
#f1._ax1.scatter(ra, dec, marker='.', label='Observation')
# FOR FIRST
#legend1 = f1._ax1.legend(loc='upper right', fontsize=12, fancybox=True, framealpha=0, numpoints=None)
#plt.setp(legend1.get_texts(), color=config.text_color_in)
# Set title
if label is not None: plot._ax1.set_title(label, fontsize=self.config.label_fontsize)
# Return the vmin and vmax
return vmin, vmax
# ------------------------------------------------------------------------------
def _plot_model(self, index, frame, cmap, vmin=None, vmax=None, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param index:
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_model_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
#f6._ax1.scatter(ra, dec, marker='.', label='Model')
#legend6 = f6._ax1.legend(loc='upper right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend6.get_texts(), color=config.text_color_in)
# Set image background color
plot.set_nan_color(self.nan_color)
# ------------------------------------------------------------------------------
def _plot_residuals(self, index, frame, cmap, amplitude=None, soft_amplitude=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Create the plot
plot = self.create_residuals_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_residual_vmin_vmax(frame, amplitude=amplitude, soft_amplitude=soft_amplitude)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
# f11._ax1.scatter(ra, dec, marker='.', label='Relative \nResidual')
# FOR FIRST
# Set legend
#legend11 = f11._ax1.legend(loc='lower right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend11.get_texts(), color=config.text_color_in)
# Set background color
plot.set_nan_color(self.background_color)
# ------------------------------------------------------------------------------
def _plot_distribution(self, index, distribution):
"""
This function ...
:param index:
:param distribution:
:return:
"""
pass
# ------------------------------------------------------------------------------
def plot_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the observed image frames ...")
# Loop over the names
#print(self.names)
#print(self.nimages)
#print(len(self.names))
for index, name in enumerate(self.names):
# Debugging
log.debug("Plotting the observed frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the observation
frame = self.get_observation(name)
# Get the label for this image
label = self.get_label(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
vmin, vmax = self._plot_observation(index, frame, cmap, label=label, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set new vmin and vmax (for corresponding model)
self.set_limits(name, vmin, vmax, soft_vmin=False, soft_vmax=False)
# ------------------------------------------------------------------------------
def plot_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the model image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_model(name): continue
# Debugging
log.debug("Plotting the model frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the model
frame = self.get_model(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
self._plot_model(index, frame, cmap, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# ------------------------------------------------------------------------------
def plot_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_residuals(name): continue
# Debugging
log.debug("Plotting the residuals frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the residuals
frame = self.get_residuals(name)
# Get the colormap for this residual map
cmap = self.get_residual_colormap(name)
# Get the amplitude
amplitude, soft_amplitude = self.get_residual_amplitude(name)
# Plot
# index, frame, cmap, amplitude=None, soft_amplitude=False
self._plot_residuals(index, frame, cmap, amplitude=amplitude, soft_amplitude=soft_amplitude)
# ------------------------------------------------------------------------------
def plot_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual distributions ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_distribution(name): continue
# Debugging
log.debug("Plotting the residual distribution of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + " ) ...")
# Get the distribution
distribution = self.get_distribution(name)
# ------------------------------------------------------------------------------
def finish(self):
"""
This function ...
:param self:
:return:
"""
# Draw
self.figure.draw()
# Save to file
if self.config.path is not None: self.figure.figure.savefig(self.config.path, dpi=self.config.dpi)
# Show
else: plt.show()
# Close
#plt.close(fig)
plt.close()
# ------------------------------------------------------------------------------
def plot_images_aplpy(frames, filepath=None, center=None, radius=None, xy_ratio=None, dark=False, scale="log",
colormap="inferno", nrows=None, ncols=None, orientation="horizontal", plotsize=3., distance=None,
share_scale=None, descriptions=None, minmax_scaling=0.5):
"""
This function ...
:param frames:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param colormap:
:param nrows:
:param ncols:
:param orientation:
:param plotsize:
:param distance:
:param share_scale:
:param descriptions:
:param minmax_scaling: 0.5
:return:
"""
import matplotlib.gridspec as gridspec
#from matplotlib.colorbar import ColorbarBase
#from matplotlib.colors import LinearSegmentedColormap
#from matplotlib.colors import Normalize
from pts.magic.tools import plotting
# Set
set_theme(dark=dark)
nimages = len(frames)
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
#print("plotsize", xsize, ysize)
# Determine the number of columns and rows
if nrows is None and ncols is None:
if orientation == "horizontal": ncols, nrows = nimages, 1
elif orientation == "vertical": ncols, nrows = 1, nimages
else: raise ValueError("Invalid orientation: '" + orientation + "'")
# Nrows is none but ncols is not
elif nrows is None: ncols = numbers.round_up_to_int(nimages/nrows)
# Ncols is none but nrows is not
elif ncols is None: nrows = numbers.round_up_to_int(nimages/ncols)
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figsize", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nrows, ncols) # nimages ROWS, 4 COLUMNS
# gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Get frame labels
if types.is_dictionary(frames):
labels = frames.keys()
frames = frames.values()
else: labels = [frame.filter_name for frame in frames]
# Set scale for each image
scales = dict()
if types.is_string_type(scale):
for label in labels: scales[label] = scale
elif types.is_sequence(scale):
for label, scalei in zip(labels, scale): scales[label] = scalei
elif types.is_dictionary(scale): scales = scale
else: raise ValueError("Invalid type for 'scale'")
# Initialize dict for intervals
intervals = dict()
# Set descriptions
if descriptions is None:
descriptions = dict()
for label in labels: descriptions[label] = None
elif types.is_sequence(descriptions):
descrpts = descriptions
descriptions = dict()
for label, descr in zip(labels, descrpts): descriptions[label] = descr
elif types.is_dictionary(descriptions): pass # OK
else: raise ValueError("Invalid type for 'descriptions'")
# Set minmax scaling
if types.is_real_type(minmax_scaling):
factor = minmax_scaling
minmax_scaling = dict()
for label in labels: minmax_scaling[label] = factor
elif types.is_dictionary(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label in labels:
if label in minmax_scaling_orig: minmax_scaling[label] = minmax_scaling_orig[label]
else: minmax_scaling[label] = 0.5
elif types.is_sequence(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label, factor in zip(labels, minmax_scaling_orig): minmax_scaling[label] = factor
else: raise ValueError("Invalid type for 'minmax_scaling'")
# Loop over the frames
for label, frame, index in zip(labels, frames, range(nimages)):
rowi = index // ncols
coli = index % ncols
is_first_row = rowi == 0
is_last_row = rowi == nrows - 1
is_first_col = coli == 0
is_last_col = coli == ncols - 1
#print("row", rowi)
#print("col", coli)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the '" + label + "' image ...")
# Get HDU
hdu = frame.to_hdu()
# Get interval
if share_scale is not None and label in share_scale:
share_with = share_scale[label]
vmin, vmax = intervals[share_with]
scalei = scales[share_with]
else:
# Get scale
scalei = scales[label]
is_logscale = scalei == "log"
#print(label, minmax_scaling[label])
vmin, vmax = plotting.get_vmin_vmax(frame.data, logscale=is_logscale, minmax_scaling=minmax_scaling[label])
# Set interval
intervals[label] = (vmin, vmax,)
# Set title
if descriptions[label] is not None: title = descriptions[label]
else: title = label.replace("_", "\_").replace("um", "$\mu$m")
# Has sky coordinate system?
has_wcs = frame.has_wcs and frame.wcs.is_sky
# OBSERVATION
figi = aplpy.FITSFigure(hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(figi, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scalei, has_wcs=has_wcs)
set_ticks(figi, is_first_row, is_last_row)
# FIRST COLUMN
if is_first_col:
figi.tick_labels.show_y()
figi.axis_labels.show_y()
# LAST ROW
if is_last_row:
figi.tick_labels.show_x()
figi.axis_labels.show_x()
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset
reset_theme()
# ------------------------------------------------------------------------------
def plot_one_residual_aplpy(observation, model, residual=None, path=None, scale="log", plotsize=3., dark=False,
center=None, radius=None, xy_ratio=None, first_label="Observation", second_label="Model",
residual_label="Residual", filter_label=True):
"""
This function ...
:param observation:
:param model:
:param residual:
:param path:
:param scale:
:param plotsize:
:param dark:
:param center:
:param radius:
:param xy_ratio:
:param first_label:
:param second_label:
:param residual_label:
:param filter_label:
:return:
"""
# Make residual?
if residual is None: residual = (model - observation) / observation
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
import matplotlib.gridspec as gridspec
from pts.magic.tools import plotting
# Set theme
set_theme(dark=dark)
nrows = 1
ncols = 3
xsize = plotsize
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
# Create figure with appropriate size
#fig = plt.figure(figsize=(figxsize, figysize))
figure = MPLFigure(size=(figxsize,figysize))
# Create grid
gs1 = gridspec.GridSpec(1, 4) # nimages ROWS, 4 COLUMNS
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Percentual residuals
residual = residual * 100.
# Set title
if filter_label and observation.has_filter: title = str(observation.filter).replace("um", " $\mu$m")
else: title = first_label
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
# Get interval
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale, has_wcs=observation.has_celestial_wcs)
set_ticks(fig1, True, True)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig1.tick_labels.show_x()
fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label=second_label, center=center, radius=radius, scale=scale, has_wcs=model.has_celestial_wcs)
set_ticks(fig2, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig2.tick_labels.show_x()
fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label=residual_label + ' (\%)', center=center, radius=radius, has_wcs=residual.has_celestial_wcs)
set_ticks(fig3, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig3.tick_labels.show_x()
fig3.axis_labels.show_x()
# Show or save
if path is None: figure.show()
else: figure.saveto(path)
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def plot_residuals_aplpy(observations, models, residuals, filepath=None, center=None, radius=None, xy_ratio=None,
dark=False, scale="log", plotsize=3., distance=None, mask_simulated=False, masks=None):
"""
This function ...
:param observations:
:param models:
:param residuals:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param plotsize:
:param distance:
:param mask_simulated:
:param masks: if passed, both observations, models and residuals are masked
:return:
"""
import numpy as np
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import Normalize
import seaborn as sns
# Set theme
set_theme(dark=dark)
nimages = len(observations)
ncols = 4
nrows = nimages
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
# Set individual map plot size
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
#print("individual size", xsize, ysize)
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figure size", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nimages, 4) # nimages ROWS, 4 COLUMNS
#gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Loop over the filters
if masks is None: masks = [None] * nimages
for observation, model, residual, mask, index in zip(observations, models, residuals, masks, range(nimages)):
#print("units:")
#print(observation.unit)
#print(model.unit)
observation.convert_to("mJy/sr", distance=distance)
model.convert_to("mJy/sr", distance=distance)
# MASK MODEL
if mask_simulated:
model.rebin(observation.wcs)
model.apply_mask_nans(observation.nans)
# MASK ALL?
if mask is not None:
observation.apply_mask_nans(mask)
model.apply_mask_nans(mask)
residual.apply_mask_nans(mask)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the observation, model and residuals for the " + str(observation.filter) + " filter ...")
# Percentual residuals
residual = residual * 100.
# Set title
title = str(observation.filter).replace("um", " $\mu$m")
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
from pts.magic.tools import plotting
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
#vmax = 0.7 * vmax
#print("VMIN", vmin)
#print("VMAX", vmax)
# ------------------------------------------------------------------------------
# Plot obs, model and residual
# ------------------------------------------------------------------------------
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale)
set_ticks(fig1, is_first, is_last)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig1.tick_labels.show_x()
if is_last: fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label='Model', center=center, radius=radius, scale=scale)
set_ticks(fig2, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig2.tick_labels.show_x()
if is_last: fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label='Residual (\%)', center=center, radius=radius)
set_ticks(fig3, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig3.tick_labels.show_x()
if is_last: fig3.axis_labels.show_x()
# ------------------------------------------------------------------------------
# COLORBAR
colorbar_start_x = gs1[plot_idx].get_position(fig).bounds[0] + 0.025
colorbar_start_y = gs1[plot_idx].get_position(fig).bounds[1] + 0.085 / (nimages)
colorbar_x_width = gs1[plot_idx].get_position(fig).bounds[2] - 0.05
colorbar_y_height = gs1[plot_idx].get_position(fig).bounds[3]
cb_ax = fig.add_axes([colorbar_start_x, colorbar_start_y, colorbar_x_width, (0.02 + 0.002) / (nimages + 1)])
# Colourbar
cb = ColorbarBase(cb_ax, cmap=residual_colormap, norm=Normalize(vmin=-100, vmax=100), orientation='horizontal')
cb.ax.xaxis.set_ticks_position('bottom')
cb.ax.xaxis.set_label_position('bottom')
cb.ax.zorder = 99
cb.ax.xaxis.set_tick_params(color='white')
cb.outline.set_edgecolor('white')
plt.setp(plt.getp(cb.ax.axes, 'yticklabels'), color='white')
plt.setp(plt.getp(cb.ax.axes, 'xticklabels'), color='white')
cb.set_ticks([-100, -50, 0, 50, 100])
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# KDE Plot of residuals
residual = residual_hdu.data
fig4 = plt.subplot(gs1[plot_idx])
residuals_to_kde = np.where((residual <= 200) & (residual >= -200))
if dark:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='white', shade=True)
fig4.axes.set_facecolor("black")
else:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='k', shade=True)
fig4.axes.set_facecolor("white")
fig4.tick_params(labelleft='off')
plt.xlim([-150, 150])
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
# Hide tick labels except for the last (bottom) plot
if not is_last: fig4.tick_params(labelbottom=False)
if dark: plt.axvline(0, c='white', ls='--', lw=2)
else: plt.axvline(0, c='k', ls='--', lw=2)
# Label for kde
plt.xlabel('Residual (\%)')
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def setup_map_plot(figure, colormap, vmin, vmax, label, smooth=None,text_x=0.05, text_y=0.95, center=None,
radius=None, scale="linear", has_wcs=True):
"""
This function ...
:param figure:
:param colormap:
:param vmin:
:param vmax:
:param label:
:param smooth:
:param text_x:
:param text_y:
:param center:
:param radius:
:param scale:
:param has_wcs:
:return:
"""
figure.show_colorscale(cmap=colormap, vmin=vmin, vmax=vmax, smooth=smooth, stretch=scale)
#figure.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm:ss')
if has_wcs:
figure.tick_labels.set_xformat('hh:mm:ss')
figure.tick_labels.set_yformat('dd:mm:ss')
figure._ax1.set_facecolor('black')
figure.set_nan_color('black')
# RECENTER
if center is not None:
if radius is None: raise ValueError("Cannot specify center without radius")
if has_wcs: figure.recenter(center.ra.to("deg").value, center.dec.to("deg").value, radius=radius.to("deg").value)
else: figure.recenter(center.x, center.y, radius=radius)
# Hide axes labels and tick labels by default (enable for y for first column and for x for last row)
figure.axis_labels.hide()
figure.tick_labels.hide()
# Axes spines
figure._ax1.spines['bottom'].set_color('white')
figure._ax1.spines['top'].set_color('white')
figure._ax1.spines["left"].set_color("white")
figure._ax1.spines["right"].set_color("white")
# TICKS
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# SET LABEL
figure.add_label(text_x, text_y, r'' + str(label), relative=True, size=13, weight='bold', color='white',
horizontalalignment='left', verticalalignment='top',
bbox=dict(facecolor='black', edgecolor='none', alpha=0.5))
# ------------------------------------------------------------------------------
def set_ticks(figure, is_first_row, is_last_row):
"""
This function ...
:param figure:
:param is_first_row:
:param is_last_row:
:return:
"""
# ONLY ROW?
is_only_row = is_first_row and is_last_row
# ONLY
if is_only_row:
# IN EVERYWHERE
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# FIRST
elif is_first_row:
# LEFT, RIGHT AND TOP
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=False, left=True)
# LAST
elif is_last_row:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# BOTTOM, LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# In between
else:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=False, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=False, right=True, bottom=False, left=True)
# ------------------------------------------------------------------------------
def set_theme(dark=False):
"""
This function ...
:param dark:
:return:
"""
# General settings
plt.rcParams["axes.labelsize"] = 14 # 16 #default 20
plt.rcParams["xtick.labelsize"] = 8 # 10 #default 16
plt.rcParams["ytick.labelsize"] = 8 # 10 #default 16
plt.rcParams["legend.fontsize"] = 14 # 10 #default 14
plt.rcParams["legend.markerscale"] = 0
plt.rcParams["lines.markersize"] = 2.5 # 4 #default 4
plt.rcParams["axes.linewidth"] = 1
# Colors
if dark:
plt.rcParams['axes.facecolor'] = 'black'
plt.rcParams['savefig.facecolor'] = 'black'
plt.rcParams['axes.edgecolor'] = 'white'
plt.rcParams['xtick.color'] = 'white'
plt.rcParams['ytick.color'] = 'white'
plt.rcParams["axes.labelcolor"] = 'white'
plt.rcParams["text.color"] = 'white'
else:
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['savefig.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams["axes.labelcolor"] = 'black'
plt.rcParams["text.color"] = 'black'
# ------------------------------------------------------------------------------
def reset_theme():
"""
This function ...
:return:
"""
# Back to original settings
plt.rcParams.update(plt.rcParamsDefault)
# ------------------------------------------------------------------------------
| agpl-3.0 |
glamp/coffe2py | main.py | 1 | 1282 | import sys
from IPython.core.interactiveshell import InteractiveShell
import pandasjson as json
import StringIO
if __name__=="__main__":
mode = "ipython"
line = sys.stdin.readline()
shell = InteractiveShell()
while line:
# explicitly write to stdout
sys.stdout.write(line)
sys.stdout.flush()
# handle incoming data, parse it, and redirect
# stdout so it doesn't interfere
line = sys.stdin.readline()
data = json.loads(line)
codeOut = StringIO.StringIO()
sys.stdout = codeOut
try:
code = data["code"]
if data.get("autocomplete")==True:
_, completions = shell.complete(code)
print json.dumps(completions)
elif code.startswith("print"):
#exec(code)
shell.ex(code)
else:
try:
#print repr(eval(code))
print repr(shell.ev(code))
except:
#exec(code)
shell.ex(code)
except Exception, e:
pass
sys.stdout = sys.__stdout__
data["result"] = codeOut.getvalue()
sys.stdout.write(json.dumps(data) + "\n")
sys.stdout.flush() | bsd-2-clause |
capntransit/carfree-council | cfcensus2010.py | 1 | 1828 | import sys, os, json, time
import pandas as pd
BOROCODE = {'61' : '1', '05' : '2', '47': '3', '81' : '4', '85': '5'}
if (len(sys.argv) < 2):
print ("Usage: cfcensus.py census.csv districts.json")
exit()
censusfile = sys.argv[1]
councilfile = sys.argv[2]
TRACTCOL = 'BoroCT' # rename this for 2000 census
def boroCT (id2):
boro = BOROCODE[str(id2)[3:5]]
tract = str(id2)[5:]
return boro + tract
for (f) in ([censusfile, councilfile]):
if (not os.path.isfile(f)):
print ("File " + f + " is not readable")
exit()
try:
vehDf = pd.read_csv(
censusfile,
skiprows=[1]
)
except Exception as e:
print ("Unable to read census file " + censusfile + ": {0}".format(e))
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
vehDf['pctNoVeh'] = vehDf['HD01_VD03'].astype('int') / vehDf['HD01_VD01'].astype('int')
vehDf[TRACTCOL] = vehDf['GEO.id2'].apply(boroCT)
vehDf2 = pd.DataFrame(vehDf[[TRACTCOL, 'HD01_VD01', 'HD01_VD03', 'pctNoVeh']]).set_index(TRACTCOL)
f = 0
total = {}
noVeh = {}
councilDistricts = set()
for (t, c) in councilData.items():
for (d) in c:
councilDistricts.add(d)
try:
total[d] = total.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD01']
noVeh[d] = noVeh.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD03']
except KeyError as e:
print("No entry for census tract " + str(t))
for (d) in sorted(councilDistricts, key=int):
print (','.join([
d,
str(int(total[d])),
str(int(noVeh[d])),
str(round((noVeh[d] / total[d]), 3))
]))
| gpl-3.0 |
Omegaphora/external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 35 | 11261 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
sys.path.append(os.path.join(CHROMIUM_DIR, 'build'))
import detect_host_arch
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chrome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in detect_host_arch.HostArch():
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
pkg_ver_dir = os.path.join(nacl_dir, 'build', 'package_version')
RunCommand([python, os.path.join(pkg_ver_dir, 'package_version.py'),
'--exclude', 'arm_trusted',
'--exclude', 'pnacl_newlib',
'--exclude', 'nacl_arm_newlib',
'sync', '--extract'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
CartoDB/cartoframes | cartoframes/io/managers/context_manager.py | 1 | 22518 | import time
import pandas as pd
from warnings import warn
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
from carto.exceptions import CartoException, CartoRateLimitException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
from pyrestcli.exceptions import NotFoundException
from ..dataset_info import DatasetInfo
from ... import __version__
from ...auth.defaults import get_default_credentials
from ...utils.logger import log
from ...utils.geom_utils import encode_geometry_ewkb
from ...utils.utils import (is_sql_query, check_credentials, encode_row, map_geom_type, PG_NULL, double_quote,
create_tmp_name)
from ...utils.columns import (get_dataframe_columns_info, get_query_columns_info, obtain_converters, date_columns_names,
normalize_name)
DEFAULT_RETRY_TIMES = 3
BATCH_API_PAYLOAD_THRESHOLD = 12000
def retry_copy(func):
def wrapper(*args, **kwargs):
m_retry_times = kwargs.get('retry_times', DEFAULT_RETRY_TIMES)
while m_retry_times >= 1:
try:
return func(*args, **kwargs)
except CartoRateLimitException as err:
m_retry_times -= 1
if m_retry_times <= 0:
warn(('Read call was rate-limited. '
'This usually happens when there are multiple queries being read at the same time.'))
raise err
warn('Read call rate limited. Waiting {s} seconds'.format(s=err.retry_after))
time.sleep(err.retry_after)
warn('Retrying...')
return func(*args, **kwargs)
return wrapper
def not_found(func):
def decorator_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except CartoException as e:
if hasattr(e, 'args') and isinstance(e.args, (list, tuple)) and type(e.args[0]) == NotFoundException:
raise Exception('Resource not found') from None
else:
raise e
return decorator_func
class ContextManager:
def __init__(self, credentials):
self.credentials = credentials or get_default_credentials()
check_credentials(self.credentials)
self.auth_client = _create_auth_client(self.credentials)
self.sql_client = SQLClient(self.auth_client)
self.copy_client = CopySQLClient(self.auth_client)
self.batch_sql_client = BatchSQLClient(self.auth_client)
@not_found
def execute_query(self, query, parse_json=True, do_post=True, format=None, **request_args):
return self.sql_client.send(query.strip(), parse_json, do_post, format, **request_args)
@not_found
def execute_long_running_query(self, query):
return self.batch_sql_client.create_and_wait_for_completion(query.strip())
def copy_to(self, source, schema=None, limit=None, retry_times=DEFAULT_RETRY_TIMES):
query = self.compute_query(source, schema)
columns = self._get_query_columns_info(query)
copy_query = self._get_copy_query(query, columns, limit)
return self._copy_to(copy_query, columns, retry_times)
def copy_from(self, gdf, table_name, if_exists='fail', cartodbfy=True,
retry_times=DEFAULT_RETRY_TIMES):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
df_columns = get_dataframe_columns_info(gdf)
if self.has_table(table_name, schema):
if if_exists == 'replace':
table_query = self._compute_query_from_table(table_name, schema)
table_columns = self._get_query_columns_info(table_query)
if self._compare_columns(df_columns, table_columns):
# Equal columns: truncate table
self._truncate_table(table_name, schema)
else:
# Diff columns: truncate table and drop + add columns
self._truncate_and_drop_add_columns(
table_name, schema, df_columns, table_columns)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
cartodbfy = False
else:
self._create_table_from_columns(table_name, schema, df_columns)
self._copy_from(gdf, table_name, df_columns, retry_times)
if cartodbfy is True:
cartodbfy_query = _cartodbfy_query(table_name, schema)
self.execute_long_running_query(cartodbfy_query)
return table_name
def create_table_from_query(self, query, table_name, if_exists):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
if self.has_table(table_name, schema):
if if_exists == 'replace':
# TODO: review logic copy_from
self._drop_create_table_from_query(table_name, schema, query)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
pass
else:
self._drop_create_table_from_query(table_name, schema, query)
return table_name
def list_tables(self, schema=None):
datasets = DatasetManager(self.auth_client).filter(
show_table_size_and_row_count='false',
show_table='false',
show_stats='false',
show_likes='false',
show_liked='false',
show_permission='false',
show_uses_builder_features='false',
show_synchronization='false',
load_totals='false'
)
datasets.sort(key=lambda x: x.updated_at, reverse=True)
return pd.DataFrame([dataset.name for dataset in datasets], columns=['tables'])
def has_table(self, table_name, schema=None):
query = self.compute_query(table_name, schema)
return self._check_exists(query)
def delete_table(self, table_name):
query = _drop_table_query(table_name)
output = self.execute_query(query)
return not('notices' in output and 'does not exist' in output['notices'][0])
def _delete_function(self, function_name):
query = _drop_function_query(function_name)
self.execute_query(query)
return function_name
def _create_function(self, schema, statement,
function_name=None, columns_types=None, return_value='VOID', language='plpgsql'):
function_name = function_name or create_tmp_name(base='tmp_func')
safe_schema = double_quote(schema)
query, qualified_func_name = _create_function_query(
schema=safe_schema,
function_name=function_name,
statement=statement,
columns_types=columns_types or '',
return_value=return_value,
language=language)
self.execute_query(query)
return qualified_func_name
def rename_table(self, table_name, new_table_name, if_exists='fail'):
new_table_name = self.normalize_table_name(new_table_name)
if table_name == new_table_name:
raise ValueError('Table names are equal. Please choose a different table name.')
if not self.has_table(table_name):
raise Exception('Table "{table_name}" does not exist in your CARTO account.'.format(
table_name=table_name))
if self.has_table(new_table_name):
if if_exists == 'replace':
log.debug('Removing table "{}"'.format(new_table_name))
self.delete_table(new_table_name)
elif if_exists == 'fail':
raise Exception('Table "{new_table_name}" already exists in your CARTO account. '
'Please choose a different `new_table_name` or use '
'if_exists="replace" to overwrite it.'.format(
new_table_name=new_table_name))
self._rename_table(table_name, new_table_name)
return new_table_name
def update_privacy_table(self, table_name, privacy=None):
DatasetInfo(self.auth_client, table_name).update_privacy(privacy)
def get_privacy(self, table_name):
return DatasetInfo(self.auth_client, table_name).privacy
def get_schema(self):
"""Get user schema from current credentials"""
query = 'SELECT current_schema()'
result = self.execute_query(query, do_post=False)
schema = result['rows'][0]['current_schema']
log.debug('schema: {}'.format(schema))
return schema
def get_geom_type(self, query):
"""Fetch geom type of a remote table or query"""
distict_query = '''
SELECT distinct ST_GeometryType(the_geom) AS geom_type
FROM ({}) q
LIMIT 5
'''.format(query)
response = self.execute_query(distict_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
st_geom_type = response.get('rows')[0].get('geom_type')
if st_geom_type:
return map_geom_type(st_geom_type[3:])
return None
def get_num_rows(self, query):
"""Get the number of rows in the query"""
result = self.execute_query('SELECT COUNT(*) FROM ({query}) _query'.format(query=query))
return result.get('rows')[0].get('count')
def get_bounds(self, query):
extent_query = '''
SELECT ARRAY[
ARRAY[st_xmin(geom_env), st_ymin(geom_env)],
ARRAY[st_xmax(geom_env), st_ymax(geom_env)]
] bounds FROM (
SELECT ST_Extent(the_geom) geom_env
FROM ({}) q
) q;
'''.format(query)
response = self.execute_query(extent_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
return response.get('rows')[0].get('bounds')
return None
def get_column_names(self, source, schema=None, exclude=None):
query = self.compute_query(source, schema)
columns = [c.name for c in self._get_query_columns_info(query)]
if exclude and isinstance(exclude, list):
columns = list(set(columns) - set(exclude))
return columns
def is_public(self, query):
# Used to detect public tables in queries in the publication,
# because privacy only works for tables.
public_auth_client = _create_auth_client(self.credentials, public=True)
public_sql_client = SQLClient(public_auth_client)
exists_query = 'EXPLAIN {}'.format(query)
try:
public_sql_client.send(exists_query, do_post=False)
return True
except CartoException:
return False
def get_table_names(self, query):
# Used to detect tables in queries in the publication.
query = 'SELECT CDB_QueryTablesText($q${}$q$) as tables'.format(query)
result = self.execute_query(query)
tables = []
if result['total_rows'] > 0 and result['rows'][0]['tables']:
# Dataset_info only works with tables without schema
tables = [table.split('.')[1] if '.' in table else table for table in result['rows'][0]['tables']]
return tables
def _compare_columns(self, a, b):
a_copy = [i for i in a if _not_reserved(i.name)]
b_copy = [i for i in b if _not_reserved(i.name)]
a_copy.sort()
b_copy.sort()
return a_copy == b_copy
def _drop_create_table_from_query(self, table_name, schema, query):
log.debug('DROP + CREATE table "{}"'.format(table_name))
query = 'BEGIN; {drop}; {create}; COMMIT;'.format(
drop=_drop_table_query(table_name),
create=_create_table_from_query_query(table_name, query))
self.execute_long_running_query(query)
def _create_table_from_columns(self, table_name, schema, columns):
log.debug('CREATE table "{}"'.format(table_name))
query = 'BEGIN; {create}; COMMIT;'.format(
create=_create_table_from_columns_query(table_name, columns))
self.execute_query(query)
def _truncate_table(self, table_name, schema):
log.debug('TRUNCATE table "{}"'.format(table_name))
query = 'BEGIN; {truncate}; COMMIT;'.format(
truncate=_truncate_table_query(table_name))
self.execute_query(query)
def _truncate_and_drop_add_columns(self, table_name, schema, df_columns, table_columns):
log.debug('TRUNCATE AND DROP + ADD columns table "{}"'.format(table_name))
drop_columns = _drop_columns_query(table_name, table_columns)
add_columns = _add_columns_query(table_name, df_columns)
drop_add_columns = 'ALTER TABLE {table_name} {drop_columns},{add_columns};'.format(
table_name=table_name, drop_columns=drop_columns, add_columns=add_columns)
query = '{regenerate}; BEGIN; {truncate}; {drop_add_columns}; COMMIT;'.format(
regenerate=_regenerate_table_query(table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_columns=drop_add_columns)
query_length_over_threshold = len(query) > BATCH_API_PAYLOAD_THRESHOLD
if query_length_over_threshold:
qualified_func_name = self._create_function(
schema=schema, statement=drop_add_columns)
drop_add_func_sql = 'SELECT {}'.format(qualified_func_name)
query = '''
{regenerate};
BEGIN;
{truncate};
{drop_add_func_sql};
COMMIT;'''.format(
regenerate=_regenerate_table_query(
table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_func_sql=drop_add_func_sql)
try:
self.execute_long_running_query(query)
finally:
if query_length_over_threshold:
self._delete_function(qualified_func_name)
def compute_query(self, source, schema=None):
if is_sql_query(source):
return source
schema = schema or self.get_schema()
return self._compute_query_from_table(source, schema)
def _compute_query_from_table(self, table_name, schema):
return 'SELECT * FROM "{schema}"."{table_name}"'.format(
schema=schema or 'public',
table_name=table_name
)
def _check_exists(self, query):
exists_query = 'EXPLAIN {}'.format(query)
try:
self.execute_query(exists_query, do_post=False)
return True
except CartoException:
return False
def _check_regenerate_table_exists(self):
query = '''
SELECT 1
FROM pg_catalog.pg_proc p
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE p.proname = 'cdb_regeneratetable' AND n.nspname = 'cartodb';
'''
result = self.execute_query(query)
return len(result['rows']) > 0
def _get_query_columns_info(self, query):
query = 'SELECT * FROM ({}) _q LIMIT 0'.format(query)
table_info = self.execute_query(query)
return get_query_columns_info(table_info['fields'])
def _get_copy_query(self, query, columns, limit):
query_columns = [
double_quote(column.name) for column in columns
if (column.name != 'the_geom_webmercator')
]
query = 'SELECT {columns} FROM ({query}) _q'.format(
query=query,
columns=','.join(query_columns))
if limit is not None:
if isinstance(limit, int) and (limit >= 0):
query += ' LIMIT {limit}'.format(limit=limit)
else:
raise ValueError("`limit` parameter must an integer >= 0")
return query
@retry_copy
def _copy_to(self, query, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY TO')
copy_query = "COPY ({0}) TO stdout WITH (FORMAT csv, HEADER true, NULL '{1}')".format(query, PG_NULL)
raw_result = self.copy_client.copyto_stream(copy_query)
converters = obtain_converters(columns)
parse_dates = date_columns_names(columns)
df = pd.read_csv(
raw_result,
converters=converters,
parse_dates=parse_dates)
return df
@retry_copy
def _copy_from(self, dataframe, table_name, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY FROM')
query = """
COPY {table_name}({columns}) FROM stdin WITH (FORMAT csv, DELIMITER '|', NULL '{null}');
""".format(
table_name=table_name, null=PG_NULL,
columns=','.join(double_quote(column.dbname) for column in columns)).strip()
data = _compute_copy_data(dataframe, columns)
self.copy_client.copyfrom(query, data)
def _rename_table(self, table_name, new_table_name):
query = _rename_table_query(table_name, new_table_name)
self.execute_query(query)
def normalize_table_name(self, table_name):
norm_table_name = normalize_name(table_name)
if norm_table_name != table_name:
log.debug('Table name normalized: "{}"'.format(norm_table_name))
return norm_table_name
def _drop_table_query(table_name, if_exists=True):
return 'DROP TABLE {if_exists} {table_name}'.format(
table_name=table_name,
if_exists='IF EXISTS' if if_exists else '')
def _drop_function_query(function_name, columns_types=None, if_exists=True):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns)
return 'DROP FUNCTION {if_exists} {function_name}{columns_str_call}'.format(
function_name=function_name,
if_exists='IF EXISTS' if if_exists else '',
columns_str_call='({columns_str})'.format(columns_str=columns_str) if columns else '')
def _truncate_table_query(table_name):
return 'TRUNCATE TABLE {table_name}'.format(
table_name=table_name)
def _create_function_query(schema, function_name, statement, columns_types, return_value, language):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns) if columns else ''
function_query = '''
CREATE FUNCTION {schema}.{function_name}({columns_str})
RETURNS {return_value} AS $$
BEGIN
{statement}
END;
$$ LANGUAGE {language}
'''.format(schema=schema,
function_name=function_name,
statement=statement,
columns_str=columns_str,
return_value=return_value,
language=language)
qualified_func_name = '{schema}.{function_name}({columns_str})'.format(
schema=schema, function_name=function_name, columns_str=columns_str)
return function_query, qualified_func_name
def _drop_columns_query(table_name, columns):
columns = ['DROP COLUMN {name}'.format(name=double_quote(c.dbname))
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _add_columns_query(table_name, columns):
columns = ['ADD COLUMN {name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype)
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _not_reserved(column):
RESERVED_COLUMNS = ['cartodb_id', 'the_geom', 'the_geom_webmercator']
return column not in RESERVED_COLUMNS
def _create_table_from_columns_query(table_name, columns):
columns = ['{name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype) for c in columns]
return 'CREATE TABLE {table_name} ({columns})'.format(
table_name=table_name,
columns=','.join(columns))
def _create_table_from_query_query(table_name, query):
return 'CREATE TABLE {table_name} AS ({query})'.format(table_name=table_name, query=query)
def _cartodbfy_query(table_name, schema):
return "SELECT CDB_CartodbfyTable('{schema}', '{table_name}')".format(
schema=schema, table_name=table_name)
def _regenerate_table_query(table_name, schema):
return "SELECT CDB_RegenerateTable('{schema}.{table_name}'::regclass)".format(
schema=schema, table_name=table_name)
def _rename_table_query(table_name, new_table_name):
return 'ALTER TABLE {table_name} RENAME TO {new_table_name};'.format(
table_name=table_name, new_table_name=new_table_name)
def _create_auth_client(credentials, public=False):
return APIKeyAuthClient(
base_url=credentials.base_url,
api_key='default_public' if public else credentials.api_key,
session=credentials.session,
client_id='cartoframes_{}'.format(__version__),
user_agent='cartoframes_{}'.format(__version__))
def _compute_copy_data(df, columns):
for index in df.index:
row_data = []
for column in columns:
val = df.at[index, column.name]
if column.is_geom:
val = encode_geometry_ewkb(val)
row_data.append(encode_row(val))
csv_row = b'|'.join(row_data)
csv_row += b'\n'
yield csv_row
| bsd-3-clause |
hsuantien/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_collections.py | 2 | 21231 | """
Tests specific to the collections module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import numpy as np
from numpy.testing import (
assert_array_equal, assert_array_almost_equal, assert_equal)
import pytest
import matplotlib.pyplot as plt
import matplotlib.collections as mcollections
import matplotlib.transforms as mtransforms
from matplotlib.collections import Collection, EventCollection
from matplotlib.testing.decorators import image_comparison
def generate_EventCollection_plot():
'''
generate the initial collection and plot it
'''
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig = plt.figure()
splt = fig.add_subplot(1, 1, 1)
splt.add_collection(coll)
splt.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
splt.set_xlim(-1, 22)
splt.set_ylim(0, 2)
return splt, coll, props
@image_comparison(baseline_images=['EventCollection_plot__default'])
def test__EventCollection__get_segments():
'''
check to make sure the default segments have the correct coordinates
'''
_, coll, props = generate_EventCollection_plot()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
def test__EventCollection__get_positions():
'''
check to make sure the default positions match the input positions
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['positions'], coll.get_positions())
def test__EventCollection__get_orientation():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['orientation'], coll.get_orientation())
def test__EventCollection__is_horizontal():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(True, coll.is_horizontal())
def test__EventCollection__get_linelength():
'''
check to make sure the default linelength matches the input linelength
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['linelength'], coll.get_linelength())
def test__EventCollection__get_lineoffset():
'''
check to make sure the default lineoffset matches the input lineoffset
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['lineoffset'], coll.get_lineoffset())
def test__EventCollection__get_linestyle():
'''
check to make sure the default linestyle matches the input linestyle
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(coll.get_linestyle(), [(None, None)])
def test__EventCollection__get_color():
'''
check to make sure the default color matches the input color
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['color'], coll.get_color())
check_allprop_array(coll.get_colors(), props['color'])
@image_comparison(baseline_images=['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
'''
check to make sure set_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
'''
check to make sure add_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.add_positions(props['extra_positions'][0])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(baseline_images=['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
'''
check to make sure append_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
'''
check to make sure extend_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
'''
check to make sure switch_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(
baseline_images=['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
'''
check to make sure calling switch_orientation twice sets the
orientation back to the default
'''
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert_equal(props['orientation'], coll.get_orientation())
assert_equal(True, coll.is_horizontal())
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(baseline_images=['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
'''
check to make sure set_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(baseline_images=['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
'''
check to make sure set_linelength works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert_equal(new_linelength, coll.get_linelength())
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(baseline_images=['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
'''
check to make sure set_lineoffset works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert_equal(new_lineoffset, coll.get_lineoffset())
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison(baseline_images=['EventCollection_plot__set_linestyle'])
def test__EventCollection__set_linestyle():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = 'dashed'
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_ls_dash'],
remove_text=True)
def test__EventCollection__set_linestyle_single_dash():
'''
check to make sure set_linestyle accepts a single dash pattern
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = (0, (6., 6.))
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_linewidth'])
def test__EventCollection__set_linewidth():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linewidth = 5
coll.set_linewidth(new_linewidth)
assert_equal(coll.get_linewidth(), new_linewidth)
splt.set_title('EventCollection: set_linewidth')
@image_comparison(baseline_images=['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
'''
check to make sure set_color works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
np.testing.assert_array_equal(new_color, coll.get_color())
check_allprop_array(coll.get_colors(), new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
'''
check to make sure all values in the segment are correct, given a
particular set of inputs
note: this is not a test, it is used by tests
'''
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert_equal(segment[0, pos1], lineoffset + linelength / 2.)
assert_equal(segment[1, pos1], lineoffset - linelength / 2.)
assert_equal(segment[0, pos2], positions[i])
assert_equal(segment[1, pos2], positions[i])
def check_allprop_array(values, target):
'''
check to make sure all values match the given target if arrays
note: this is not a test, it is used by tests
'''
for value in values:
np.testing.assert_array_equal(value, target)
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# Github issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert_equal(ax.dataLim.bounds, bounds)
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert_equal(q.get_datalim(ax.transData).bounds, (0., 0., 7., 9.))
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert_equal(ax.dataLim.bounds, (20.0, 30.0, 15.0, 6.0))
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(baseline_images=['EllipseCollection_test_image'],
extensions=['png'],
remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X/float(x[-1])
hh = Y/float(y[-1])
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(baseline_images=['polycollection_close'],
extensions=['png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = Axes3D(fig)
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(baseline_images=['regularpolycollection_rotate'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=[xy], transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(baseline_images=['regularpolycollection_scale'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super(SquareCollection, self).__init__(
4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1 / sqrt(2)`, so `pi * r**2`
# equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000], picker=True)
fig.savefig(io.BytesIO(), dpi=fig.dpi)
class MouseEvent(object):
pass
event = MouseEvent()
event.x = 325
event.y = 240
found, indices = col.contains(event)
assert found
assert_array_equal(indices['ind'], [0])
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
@image_comparison(baseline_images=['size_in_xy'], remove_text=True,
extensions=['png'])
def test_size_in_xy():
fig, ax = plt.subplots()
widths, heights, angles = (10, 10), 10, 0
widths = 10, 10
coords = [(10, 10), (15, 15)]
e = mcollections.EllipseCollection(
widths, heights, angles,
units='xy',
offsets=coords,
transOffset=ax.transData)
ax.add_collection(e)
ax.set_xlim(0, 30)
ax.set_ylim(0, 30)
def test_pandas_indexing():
pd = pytest.importorskip('pandas')
# Should not fail break when faced with a
# non-zero indexed series
index = [11, 12, 13]
ec = fc = pd.Series(['red', 'blue', 'green'], index=index)
lw = pd.Series([1, 2, 3], index=index)
ls = pd.Series(['solid', 'dashed', 'dashdot'], index=index)
aa = pd.Series([True, False, True], index=index)
Collection(edgecolors=ec)
Collection(facecolors=fc)
Collection(linewidths=lw)
Collection(linestyles=ls)
Collection(antialiaseds=aa)
@pytest.mark.style('default')
def test_lslw_bcast():
col = mcollections.PathCollection([])
col.set_linestyles(['-', '-'])
col.set_linewidths([1, 2, 3])
assert_equal(col.get_linestyles(), [(None, None)] * 6)
assert_equal(col.get_linewidths(), [1, 2, 3] * 2)
col.set_linestyles(['-', '-', '-'])
assert_equal(col.get_linestyles(), [(None, None)] * 3)
assert_equal(col.get_linewidths(), [1, 2, 3])
@image_comparison(baseline_images=['scatter_post_alpha'],
extensions=['png'], remove_text=True,
style='default')
def test_scatter_post_alpha():
fig, ax = plt.subplots()
sc = ax.scatter(range(5), range(5), c=range(5))
# this needs to be here to update internal state
fig.canvas.draw()
sc.set_alpha(.1)
| mit |
sthyme/ZFSchizophrenia | BehaviorAnalysis/Alternative_Analyses/Correlation_between_genes/correlations_DISTANCE_betweengenes.py | 1 | 5605 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import matplotlib.colors as mat_col
from matplotlib.colors import LinearSegmentedColormap
import scipy
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import set_link_color_palette
import numpy as np
import pandas as pd
import glob
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.spatial import distance
#Dig=pd.read_csv("all_regions_sum_nPix_perk_red_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_nPix_perk_green_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_thres50_newnames.csv")
#Dir=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_newnames.csv")
#Db=pd.read_csv("MAYbehaviorfullset_transposed.csv")
Db=pd.read_csv("AUG16_12_dectest.csv")
#Db=pd.read_csv("AUGMAY18testingfinalfullgoodonesoct30nonoise_transposed.csv")
#Dig = Dig.applymap(np.log)
#Digl = Dig # use if skipping log10
#Digl = Dig.applymap(np.log10)
#print Dig
#Digl = Digl.replace([np.inf, -np.inf], 0)
#Digl = Digl.replace([np.inf, -np.inf], np.nan)
# use if not doing log10
#Digl = Digl.replace([0], np.nan)
#Dig = Dig.replace([0], np.nan)
#DignoNA = Dig.dropna()
#Db = Db.apply(lambda x: [y if 0 < y < 0.05 else np.nan for y in x])
#Db = Db.apply(lambda x: [y if -0.05 < y < 0 else np.nan for y in x])
#print Db["adamtsl3"]
#for binarizing
# DEC 2018, THIS BINARIZING WORKS, BUT NOT DOIN GIT
# only binarizing the "non-significant" data
Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 1 for y in x])
# convert all non-significant values to large number
##Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 5 for y in x])
#print Db["adamtsl3"]
# keep all positive values, everything negative (between 0 and -0.05) becomes -1
##Db = Db.apply(lambda x: [y if y > 0 else -1 for y in x])
#print Db["adamtsl3"]
##Db = Db.apply(lambda x: [y if y < 2 else 0 for y in x])
#print Db["adamtsl3"]
# everything that is negative or 0 stays the same, everything else (between 0 and 0.05) becomes 1
##Db = Db.apply(lambda x: [y if y <= 0 else 1 for y in x])
#print Db["adamtsl3"]
#Db = Db.apply(lambda x: [y if y == np.nan else 1 for y in x])
#Db = Db.apply(lambda x: [y if y != np.nan else 0 for y in x])
# TRYING LOG ON P-VALUES, NOT SURE IF GOOD IDEA
#Db = Db.applymap(np.log10)
###Db = Db.apply(lambda x: [y if -0.1 < y < 0.1 else np.nan for y in x])
#print Db
#exit()
corrlist = []
dfdict = {}
dfdictdist = {}
collist = []
for column1 in Db:
for column2 in Db:
corr = Db[column1].corr(Db[column2], min_periods=6)
# dist = np.square(Db[column1] - Db[column2])
# print dist
dist = distance.euclidean(Db[column1], Db[column2])
# print dist
#corr = Db[column1].corr(Dig[column2], method='spearman', min_periods=7)
# if corr > 0.6 or corr < -0.6:
#corrlist.append( (corr, column1, column2))
#newdf = pd.concat([Dig[column2], Digl[column2], Db[column1]], axis=1)
newdf = pd.concat([Db[column2], Db[column1]], axis=1)
# newdf = newdf.dropna()
corrlist.append( (corr, newdf, column1, column2, dist))
if column1 in dfdict.keys():
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
else:
dfdict[column1] = []
dfdictdist[column1] = []
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
if column2 not in collist:
collist.append(column2)
#corrlist.append( (corr, column1, column2, newdf))
#newdf = Dig[column2].copy()
#newdf2 = newdf.concat(Db[column1])
#newdf[column1] = Db[column1]
#print newdf.dropna()
#exit()
# break
#break
#print dfdict
#print dfdictdist
#print collist
dfcor = pd.DataFrame.from_dict(dfdict, orient='index')
dfcor.columns = collist
dfdist = pd.DataFrame.from_dict(dfdictdist, orient='index')
dfdist.columns = collist
dfcor = dfcor.sort_index()
dfdist = dfdist.sort_index()
dfcor.to_csv("dec_correlation_sort1.csv")
dfdist.to_csv("dec_distance_sort1.csv")
#print dfcor
#corrlist.sort(key=lambda tup: tup[0])
#old way of just printing before generate the DF
##for i in range(0, len(corrlist)):
## print corrlist[i][0], corrlist[i][4], corrlist[i][2], corrlist[i][3]
#print corrlist[i][1]
#print corrlist[i][2]
#Db=pd.read_csv("MAY2018fullheatmapsetfinal_0.csv")
#Db = Db.transpose()
#Dig = Dig.values
#Dir = Dir.values
#Db = Db.values
#print "test1"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Dig
#print "test2"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Db
#Digb = Dig[:,1:]
#Dirb = Dir[:,1:]
#Digb = np.delete(Dig, 0, axis=1)
#Dbb = Db[:,1:]
#Dbb = np.delete(Db, 0, axis=1)
#Digb = np.log(Digb)
#Digb = Dig.values
#Dbb = Db.values
#print "test1"
#print Dbb
#print "test2"
#print Digb
#print np.shape(Dbb)
#print np.shape(Digb)
#for row in range(Digb.shape[0]):
#print str(pearsonr(Dbb[row,:], Digb[row,:]))
#print str(pearsonr(Dbb[:,row], Digb[:,row]))
#spearlist = []
#print "green correlation"
#for column1 in Digb.T:
# for column2 in Dbb.T:
# spearlist.append(str(spearmanr(column1, column2, nan_policy='omit')))
#spearlist.sort()
#for s in spearlist:
# print s
#print "red correlation"
#for column3 in Dirb.T:
# for column4 in Dbb.T:
# print str(pearsonr(column3, column4))
#for column1 in Dig:
# for column2 in Db:
# print column1.corr
#print "green correlation"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
#print Dig.corrwith(Db.set_axis(Dig.columns, axis='columns', inplace=False))
#print Dig.corrwith(Db)
#print "red correlation"
#Dir.corrwith(Db)
| mit |
sillvan/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 1 | 3320 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The Hyperspy developers
#
# This file is part of Hyperspy.
#
# Hyperspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hyperspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hyperspy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
---------
x1: array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
x2: array or float
The position of the end of the line segment in x.
see x1 arguments
y: array or float
The position of line segment in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import numpy as np
>>> im = signals.Image(np.zeros((100, 100)))
>>> m = utils.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {}
lp['color'] = 'black'
lp['linewidth'] = 1
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 |
giacomov/lclike | lclike/duration_computation.py | 1 | 12141 | __author__ = 'giacomov'
# !/usr/bin/env python
# add |^| to the top line to run the script without needing 'python' to run it at cmd
# importing modules1
import numpy as np
# cant use 'show' inside the farm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import argparse
import decayLikelihood
import warnings
####################################################################
mycmd = argparse.ArgumentParser() # this is a class
mycmd.add_argument('triggername', help="The name of the GRB in YYMMDDXXX format (ex. bn080916009)")
mycmd.add_argument('redshift', help="Redshift for object.")
mycmd.add_argument('function', help="Function to model. (ex. crystalball2, band)")
mycmd.add_argument('directory', help="Directory containing the file produced by gtburst")
if __name__ == "__main__":
args = mycmd.parse_args()
os.chdir(args.directory)
##############################################################################
textfile = os.path.join(args.directory, '%s_res.txt' % (args.triggername))
tbin = np.recfromtxt(textfile, names=True)
textfile = os.path.join(args.directory, '%s_MCsamples_%s.txt' % (args.triggername, args.function))
samples = np.recfromtxt(textfile, names=True)
# function for returning 1 and 2 sigma errors from sample median
def getErr(sampleArr):
# compute sample percentiles for 1 and 2 sigma
m, c, p = np.percentile(sampleArr, [16, 50, 84])
# print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
m2, c2, p2 = np.percentile(sampleArr, [3, 50, 97])
return m, c, p, m2, c2, p2
# prepare for plotting and LOOP
t = np.logspace(0, 4, 100)
t = np.append(t, np.linspace(0, 1, 10))
t.sort()
t = np.unique(t)
print('NUMBER OF times to iterate: %s' % (len(t)))
x = decayLikelihood.DecayLikelihood()
if args.function == 'crystalball2':
crystal = decayLikelihood.CrystalBall2() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(crystal)
# CrystalBall DiffFlux####################################################
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0])
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
# NORMALIZATION IS THE FLUX AT THE PEAK
pB = parameters[3] # decay time is independent of scale # (y*.001) # scale =0.001, for all xml files
fBe = pB / np.e
# t = (fBe/N)**(-1/a) defined to be 1
mu = parameters[0]
tP = mu
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
teP = mu + (fBe / parameters[3]) ** (
-1 / parameters[2]) # sometimes 'RuntimeWarning: overflow encountered in double_scalars'
except Warning:
print('RuntimeWarning Raised! mu,sigma,decayIndex,and N:', parameters)
teP = parameters[0] + (fBe / parameters[3]) ** (-1 / parameters[2])
Peak[i] = pB
ePeak[i] = fBe
# redshift correcting t/(1+z)
tPeak[i] = tP / (1 + float(args.redshift)) ################################
tePeak[i] = teP / (1 + float(args.redshift)) ################################
elif args.function == 'band':
band = decayLikelihood.DecayBand() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(band)
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0]) # fractional brightness used in calcuating char-time, but not needed otherwise
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0]) # characteristic time
T05 = np.zeros(samples.shape[0])
T90 = np.zeros(samples.shape[0])
T95 = np.zeros(samples.shape[0])
T25 = np.zeros(samples.shape[0])
T50 = np.zeros(samples.shape[0])
T75 = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
tc = band.getCharacteristicTime() # get the characteristic time.
# T50/T90 TAKING TOO LONG (1/4)
# t90, t05, t95 = band.getTsomething( 90 ) # if the argument is 90, returns the T90 as well as the T05 and the T95. If the argument is 50, returns the T50 as well as the T25 and T75, and so on.
# t50, t25, t75 = band.getTsomething( 50 )
tp, fp = band.getPeakTimeAndFlux() # returns the time of the peak, as well as the peak flux
tePeak[i] = tc / (1 + float(args.redshift)) ################################
tPeak[i] = tp / (1 + float(args.redshift))
Peak[i] = fp
# T50/T90 TAKING TOO LONG (2/4)
# T05[i] = t05/(1+float(args.redshift))
# T90[i] = t90/(1+float(args.redshift))
# T95[i] = t95/(1+float(args.redshift))
# T50/T90 TAKING TOO LONG (3/4)
# T25[i] = t25/(1+float(args.redshift))
# T50[i] = t50/(1+float(args.redshift))
# T75[i] = t75/(1+float(args.redshift))
# Defining sigma bands
print('ENTERING Percentile LOOP')
upper = np.zeros(t.shape[0])
lower = np.zeros(t.shape[0])
upper2 = np.zeros(t.shape[0])
lower2 = np.zeros(t.shape[0])
meas = np.zeros(t.shape[0])
fluxMatrix = np.zeros([samples.shape[0], t.shape[0]])
for i, s in enumerate(samples):
x.decayFunction.setParameters(*s)
fluxes = map(x.decayFunction.getDifferentialFlux, t)
fluxMatrix[i, :] = np.array(fluxes)
for i, tt in enumerate(t):
allFluxes = fluxMatrix[:, i]
m, p = np.percentile(allFluxes, [16, 84])
lower[i] = m
upper[i] = p
m2, p2 = np.percentile(allFluxes, [2.5, 97.5])
lower2[i] = m2
upper2[i] = p2
wdir = '%s' % (args.directory)
# save TXT files instead of .npy
placeFile = os.path.join(wdir, "%s_tBrightness_%s" % (args.triggername, args.function))
with open(placeFile, 'w+') as f:
f.write("Peak tPeak ePeak tePeak\n")
for i, s in enumerate(Peak):
f.write("%s %s %s %s\n" % (Peak[i], tPeak[i], ePeak[i], tePeak[i]))
# CALCULATING T50/T90 TAKES TOO LONG
# T50/T90 TAKING TOO LONG (4/4)
# if args.function == 'band':
# #compute percentiles for 1 sigma
# m90,c90,p90 = np.percentile(T90,[16,50,84])
# m50,c50,p50 = np.percentile(T50,[16,50,84])
# #compute percentiles for 1 and 2 sigma
# #90m,90c,90p,90m2,90c2,90p2 = getErr(T90)
# #50m,50c,50p,50m2,50c2,50p2 = getErr(T50)
# #print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
#
# placeFile=os.path.join(wdir,"%s_t90_t50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 90minus 90plus t50 50minus 50plus\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s %s %s %s\n" % (m90,m90-c90,p90-c90,c50,m50-c50,p50-c50)) #c,m-c,p-c
#
# placeFile=os.path.join(wdir,"%s_samplesT90_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 t05 t95\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s\n" % (T90[i],T05[i],T95[i]))
# placeFile=os.path.join(wdir,"%s_samplesT50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t50 t25 t25\n")
# for i,s in enumerate(T50):
# f.write("%s %s %s\n" % (T50[i],T25[i],T75[i]))
# compute char-time percentiles for 1 and 2 sigma
m, c, p, m2, c2, p2 = getErr(tePeak)
# saves txt file
wkdir = '%s' % (args.directory)
fileDir = os.path.join(wkdir, '%s_timeRes_%s' % (args.triggername, args.function))
with open(fileDir, 'w+') as f:
f.write('%s %s %s\n' % ('median', 'minus', 'plus'))
f.write('%s %s %s\n' % (c, m - c, p - c))
# PLOTTING BINS AND SIGMA BAND
print("PLOTTING...")
fig = plt.figure()
# median is your "x"
# Y is your "y"
# DY is the array containing the errors
# DY==0 filters only the zero error
data = tbin
# redshift correction /(1+args.redshif)
median = (data["tstart"] + data["tstop"]) / 2 / (1 + float(args.redshift))
start = data['tstart'] / (1 + float(args.redshift)) ##
stop = data['tstop'] / (1 + float(args.redshift)) ##
y = data["photonFlux"]
Dy = data["photonFluxError"]
try:
y = np.core.defchararray.replace(y, "<", "", count=None) # runs through array and removes strings
except:
print('No Upper-Limits Found in %s.' % (args.triggername))
try:
Dy = np.core.defchararray.replace(Dy, "n.a.", "0",
count=None) ## 0 error is nonphysical, and will be checked for in plotting
except:
print('No 0-Error Found in %s.' % (args.triggername))
bar = 0.5
color = "blue"
Y = np.empty(0, dtype=float) # makes empty 1-D array for float values
for i in y:
Y = np.append(Y, float(i))
DY = np.empty(0, dtype=float)
for i in Dy:
DY = np.append(DY, float(i))
plt.clf()
if (DY > 0).sum() > 0: # if sum() gives a non-zero value then there are error values
plt.errorbar(median[DY > 0], Y[DY > 0],
xerr=[median[DY > 0] - start[DY > 0], stop[DY > 0] - median[DY > 0]],
yerr=DY[DY > 0], ls='None', marker='o', mfc=color, mec=color, ecolor=color, lw=2, label=None)
if (DY == 0).sum() > 0:
plt.errorbar(median[DY == 0], Y[DY == 0],
xerr=[median[DY == 0] - start[DY == 0], stop[DY == 0] - median[DY == 0]],
yerr=[bar * Y[DY == 0], 0.0 * Y[DY == 0]], lolims=True, ls='None', marker='', mfc=color, mec=color,
ecolor=color, lw=2, label=None)
plt.suptitle('%s photonFlux per Time' % (args.triggername))
plt.xlabel('Rest Frame Time(s)')
plt.ylabel('Photon Flux')
plt.xscale('symlog')
plt.yscale('log')
plt.grid(True)
if args.function == 'crystalball2':
SCALE = 0.001
elif args.function == 'band':
SCALE = 1.0 # 0.1 # shouldn't need a scale anymore for Band function
ylo = 1e-7 # min(lower2*SCALE)*1e-1 # CANT GET THIS TO WORK YET DYNAMICALLY
yup = max(upper2 * SCALE) * 10
plt.ylim([ylo, yup])
# correcting for redshift t/(1+args.redshift)
plt.fill_between(t / (1 + float(args.redshift)), lower * SCALE, upper * SCALE, alpha=0.5, color='blue')
plt.fill_between(t / (1 + float(args.redshift)), lower2 * SCALE, upper2 * SCALE, alpha=0.3, color='green')
# y = map(x.decayFunction.getDifferentialFlux, t) # maps infinitesimal values of flux at time t to y
# raw_input("Press ENTER")
# PowerLaw
# plt.plot(t,,'o')
# saves plots
wdir = '%s' % (args.directory)
imsave = os.path.join(wdir, '%s_objFit_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
# histograms of 1/e and save
print("Making histograms")
fig = plt.figure(figsize=(10, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
bins = np.linspace(min(tePeak), np.max(tePeak), 100)
ax0 = plt.subplot(gs[0])
ax0.hist(tePeak, bins, normed=True)
plt.title('1/e (min to medx2)')
plt.xlabel('1/e time (s)')
plt.xlim([min(tePeak), np.median(tePeak) * 2])
ax1 = plt.subplot(gs[1])
ax1.hist(tePeak, bins, normed=True)
plt.title('1/e (min to max)')
plt.xlabel('time (s)')
plt.tight_layout()
imsave = os.path.join(wdir, '%s_hist_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
print("Finished Potting/Saving!")
| bsd-3-clause |
numenta/nupic.vision | src/nupic/vision/data/OCR/characters/parseJPG.py | 3 | 7772 | #!/usr/bin/python2
'''
This script parses JPEG images of text documents to isolate and save images
of individual characters. The size of these output images in pixels is
specified by the parameters desired_height and desired_width.
The JPEG images are converted to grey scale using a parameter called
luminance_threshold to distinguish between light and dark pixels. Lines of
text are found by searching for rows that contain dark pixels, and
characters are found by searching for columns that contain dark pixels. Once
a character is found it is padded with blank rows and columns to obtain the
desired size. The images are saved using the filenames given in the XML file.
'''
# Set desired output image height and width in pixels
desired_height = 32
desired_width = 32
DEBUG = False
import matplotlib.pyplot as plot
import numpy as np
import operator
import sys
import re
import os
from PIL import Image
from xml.dom import minidom
jpg_list = [ 'characters-0.jpg', 'characters-1.jpg', 'characters-2.jpg',
'characters-3.jpg', 'characters-4.jpg', 'characters-5.jpg',
'characters-6.jpg', 'characters-7.jpg', 'characters-8.jpg',
'characters-9.jpg', 'characters-10.jpg', 'characters-11.jpg',
'characters-12.jpg', 'characters-13.jpg', 'characters-14.jpg',
'characters-15.jpg', 'characters-16.jpg', 'characters-17.jpg',
'characters-18.jpg', 'characters-19.jpg' ]
#jpg_list = [ 'debug_doc.jpg' ]
# Parse XML file for filenames to use when saving each character image
xmldoc = minidom.parse('characters.xml')
#xmldoc = minidom.parse('debug_doc.xml')
filelist = xmldoc.getElementsByTagName('image')
print len(filelist)
#for i in range(145):
#print filelist[62*i].attributes['file'].value
# this counter gets used to select file names from an xml file
output_files_saved = 0
for jpg in jpg_list:
print jpg
im = Image.open(jpg)
width, length = im.size
if DEBUG:
print "image size: ", im.size
print "image mode: ", im.mode
print im.size[1],im.size[0]
# read pixel data from image into a numpy array
if im.mode == 'L':
pixels = np.array(list(im.getdata())).reshape(im.size[1],im.size[0])
elif im.mode == 'RGB':
pixels = np.array(list(im.convert('L').getdata())).reshape(im.size[1],
im.size[0])
#im.show()
##############################################################################
# Removed all logic for determining the value to use to distinguish between
# light and dark pixels because this is a non-trivial challenge of its own and
# I want to get to generating a data set for OCR which I can do much faster by
# choosing the threshold manually.
##############################################################################
luminance_threshold = 100
##############################################################################
# parse document for lines of text
##############################################################################
row = 0
while row < length:
# Find the first row of pixels in next line of text by ignoring blank rows
# of pixels which will have a non-zero product since white pixels have a
# luminance value of 255
#row_data = pixels[row * width : row * width + width]
while (row < length and pixels[row,:].min() > luminance_threshold):
row += 1
first_row = row
if DEBUG:
print "the first row of pixels in the line of text is ", first_row
# Find the last row of pixels in this line of text by counting rows with
# dark pixels. These rows have a product of zero since the luminance value
# of all dark pixels was set to zero
while (row < length and pixels[row:row + 2,:].min() < luminance_threshold):
row += 1
last_row = row
#if row < length:
#last_row = row + 2 # this is a hack for Cochin font Q
#row += 5 # this is a hack for Cochin font Q
if DEBUG:
print "the last row of pixels in the line of text is ", last_row
##############################################################################
# parse line of text for characters
##############################################################################
if first_row < last_row:
col = 0
while col < width:
# find first column of pixels in the next character by ignoring blank
# cols of pixels
while col < width and pixels[first_row:last_row,col].min() > luminance_threshold:
col += 1
first_col = col
# find last column of pixels in the next character by counting columns
# with dark pixels
while col < width and \
pixels[first_row:last_row,col:col + 5].min() < luminance_threshold:
col += 1
last_col = col
##############################################################################
# remove blank rows from the top and bottom of characters
##############################################################################
if first_col < last_col:
# remove blank rows from the top of the character
r = first_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r + 1;
char_first_row = r;
# remove blank rows from the bottom of the character
r = last_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r - 1;
char_last_row = r + 1;
if DEBUG:
# isolate an image of this character
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
print "Character size after whitespace removal", character.size
print first_col, first_row, last_col, last_row
#character.show()
# pad character width out to desired_width
char_width = last_col - first_col
if char_width > desired_width:
print "Character is wider than ", desired_width
else:
# add the same number of blank columns to the left and right
first_col = first_col - (desired_width - char_width) / 2
last_col = last_col + (desired_width - char_width) / 2
# if the difference was odd we'll be short one column
char_width = last_col - first_col
if char_width < desired_width:
last_col = last_col + 1
# pad character height out to desired_height
char_height = char_last_row - char_first_row
if char_height > desired_height:
print "Character is taller than ", desired_height
else:
# add the same number of blank rows to the left and right
char_first_row = char_first_row - (desired_height - char_height) / 2
char_last_row = char_last_row + (desired_height - char_height) / 2
# if the difference was odd we'll be short one row
char_height = char_last_row - char_first_row
if char_height < desired_height:
char_last_row = char_last_row + 1
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
if DEBUG:
print "Character size after padding", character.size
print first_col, char_first_row, last_col, char_last_row
#character.show()
#garbage = raw_input()
# save image to filename specified in ground truth file
filename = filelist[output_files_saved].attributes['file'].value
directory = filename.split('/')[0]
if not os.path.exists(directory):
os.makedirs(directory)
character.save(filename, "JPEG", quality=80)
output_files_saved = output_files_saved + 1
print output_files_saved
| agpl-3.0 |
google-research/social_cascades | news/graph_processing.py | 1 | 1943 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph processing script."""
import os
from absl import app
from absl import flags
from absl import logging
import networkx as nx
import pandas as pd
from utils import graph_filter_with_degree
from utils import load_graph_from_edgelist_csv
FLAGS = flags.FLAGS
flags.DEFINE_string(
'g_file',
'../proj_Data/cat_data/test3/sr_timespan_post_graph-00000-of-00001.csv',
'raw graph edgelist csv file')
flags.DEFINE_integer('low', 40, 'low degree threshold')
flags.DEFINE_integer('high', 80, 'high degree threshold')
flags.DEFINE_string('data_file', '', 'raw data path')
flags.DEFINE_string('filename', '', 'graph filename')
flags.DEFINE_string('save_path', '', 'graph save path')
def main(_):
df = pd.read_csv(FLAGS.data_file)
author_set = set(df['author'].unique())
graph = load_graph_from_edgelist_csv(FLAGS.g_file)
logging.info('Original Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
graph = graph_filter_with_degree(graph, FLAGS.low, FLAGS.high, author_set)
logging.info('Filtered Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
nx.write_gpickle(graph, os.path.join(
FLAGS.save_path, FLAGS.filename + '%s_%s.gpickle' %
(FLAGS.low, FLAGS.high)))
logging.info('Saved graph.')
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
equialgo/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
gfyoung/pandas | pandas/io/formats/printing.py | 3 | 17290 | """
Printing tools.
"""
import sys
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Sized,
Tuple,
TypeVar,
Union,
)
from pandas._config import get_option
from pandas.core.dtypes.inference import is_sequence
EscapeChars = Union[Mapping[str, str], Iterable[str]]
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
def adjoin(space: int, *lists: List[str], **kwargs) -> str:
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop("strlen", len)
justfunc = kwargs.pop("justfunc", justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode="left")
nl.extend([" " * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append("".join(lines))
return "\n".join(out_lines)
def justify(texts: Iterable[str], max_len: int, mode: str = "right") -> List[str]:
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == "left":
return [x.ljust(max_len) for x in texts]
elif mode == "center":
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather than rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(
seq: Sequence, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather than calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = "{{{body}}}"
else:
fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
# handle sets, no slicing
r = [
pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
for i in range(min(nitems, len(seq)))
]
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ","
return fmt.format(body=body)
def _pprint_dict(
seq: Mapping, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather than calling this directly.
"""
fmt = "{{{things}}}"
pairs = []
pfmt = "{key}: {val}"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
)
)
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs))
def pprint_thing(
thing: Any,
_nest_lvl: int = 0,
escape_chars: Optional[EscapeChars] = None,
default_escapes: bool = False,
quote_strings: bool = False,
max_seq_items: Optional[int] = None,
) -> str:
"""
This function is the sanctioned way of converting objects
to a string representation and properly handles nested sequences.
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : int or None, default None
Pass through to other pretty printers to limit sequence printing
Returns
-------
str
"""
def as_escaped_string(
thing: Any, escape_chars: Optional[EscapeChars] = escape_chars
) -> str:
translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or ()
result = str(thing)
for c in escape_chars:
result = result.replace(c, translate[c])
return result
if hasattr(thing, "__next__"):
return str(thing)
elif isinstance(thing, dict) and _nest_lvl < get_option(
"display.pprint_nest_depth"
):
result = _pprint_dict(
thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items
)
elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"):
result = _pprint_seq(
thing,
_nest_lvl,
escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items,
)
elif isinstance(thing, str) and quote_strings:
result = f"'{as_escaped_string(thing)}'"
else:
result = as_escaped_string(thing)
return result
def pprint_thing_encoded(
object, encoding: str = "utf-8", errors: str = "replace"
) -> bytes:
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors)
def enable_data_resource_formatter(enable: bool) -> None:
if "IPython" not in sys.modules:
# definitely not in IPython
return
from IPython import get_ipython
ip = get_ipython()
if ip is None:
# still not in IPython
return
formatters = ip.display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
if enable:
if mimetype not in formatters:
# define tableschema formatter
from IPython.core.formatters import BaseFormatter
class TableSchemaFormatter(BaseFormatter):
print_method = "_repr_data_resource_"
_return_type = (dict,)
# register it:
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
def default_pprint(thing: Any, max_seq_items: Optional[int] = None) -> str:
return pprint_thing(
thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=True,
max_seq_items=max_seq_items,
)
def format_object_summary(
obj,
formatter: Callable,
is_justify: bool = True,
name: Optional[str] = None,
indent_for_name: bool = True,
line_break_each_value: bool = False,
) -> str:
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
Whether subsequent lines should be indented to
align with the name.
line_break_each_value : bool, default False
If True, inserts a line break for each value of ``obj``.
If False, only break lines when the a line of values gets wider
than the display width.
.. versionadded:: 0.25.0
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option("display.width") or 80
if name is None:
name = type(obj).__name__
if indent_for_name:
name_len = len(name)
space1 = f'\n{(" " * (name_len + 1))}'
space2 = f'\n{(" " * (name_len + 2))}'
else:
space1 = "\n"
space2 = "\n " # space for the opening '['
n = len(obj)
if line_break_each_value:
# If we want to vertically align on each value of obj, we need to
# separate values by a line break and indent the values
sep = ",\n " + " " * len(name)
else:
sep = ","
max_seq_items = get_option("display.max_seq_items") or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = get_adjustment()
def _extend_line(
s: str, line: str, value: str, display_width: int, next_line_prefix: str
) -> Tuple[str, str]:
if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values: List[str]) -> int:
if values:
return max(adj.len(x) for x in values)
else:
return 0
close = ", "
if n == 0:
summary = f"[]{close}"
elif n == 1 and not line_break_each_value:
first = formatter(obj[0])
summary = f"[{first}]{close}"
elif n == 2 and not line_break_each_value:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = f"[{first}, {last}]{close}"
else:
if max_seq_items == 1:
# If max_seq_items=1 show only last element
head = []
tail = [formatter(x) for x in obj[-1:]]
elif n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
if line_break_each_value:
# Justify each string in the values of head and tail, so the
# strings will right align when head and tail are stacked
# vertically.
head, tail = _justify(head, tail)
elif is_truncated or not (
len(", ".join(head)) < display_width
and len(", ".join(tail)) < display_width
):
# Each string in head and tail should align with each other
max_length = max(best_len(head), best_len(tail))
head = [x.rjust(max_length) for x in head]
tail = [x.rjust(max_length) for x in tail]
# If we are not truncated and we are only a single
# line, then don't justify
if line_break_each_value:
# Now head and tail are of type List[Tuple[str]]. Below we
# convert them into List[str], so there will be one string per
# value. Also truncate items horizontally if wider than
# max_space
max_space = display_width - len(space2)
value = tail[0]
for max_items in reversed(range(1, len(value) + 1)):
pprinted_seq = _pprint_seq(value, max_seq_items=max_items)
if len(pprinted_seq) < max_space:
break
head = [_pprint_seq(x, max_seq_items=max_items) for x in head]
tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]
summary = ""
line = space2
for max_items in range(len(head)):
word = head[max_items] + sep + " "
summary, line = _extend_line(summary, line, word, display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + "..."
line = space2
for max_items in range(len(tail) - 1):
word = tail[max_items] + sep + " "
summary, line = _extend_line(summary, line, word, display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)
summary += line
# right now close is either '' or ', '
# Now we want to include the ']', but not the maybe space.
close = "]" + close.rstrip(" ")
summary += close
if len(summary) > (display_width) or line_break_each_value:
summary += space1
else: # one row
summary += " "
# remove initial space
summary = "[" + summary[len(space2) :]
return summary
def _justify(
head: List[Sequence[str]], tail: List[Sequence[str]]
) -> Tuple[List[Tuple[str, ...]], List[Tuple[str, ...]]]:
"""
Justify items in head and tail, so they are right-aligned when stacked.
Parameters
----------
head : list-like of list-likes of strings
tail : list-like of list-likes of strings
Returns
-------
tuple of list of tuples of strings
Same as head and tail, but items are right aligned when stacked
vertically.
Examples
--------
>>> _justify([['a', 'b']], [['abc', 'abcd']])
([(' a', ' b')], [('abc', 'abcd')])
"""
combined = head + tail
# For each position for the sequences in ``combined``,
# find the length of the largest string.
max_length = [0] * len(combined[0])
for inner_seq in combined:
length = [len(item) for item in inner_seq]
max_length = [max(x, y) for x, y in zip(max_length, length)]
# justify each item in each list-like in head and tail using max_length
head = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head
]
tail = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail
]
# https://github.com/python/mypy/issues/4975
# error: Incompatible return value type (got "Tuple[List[Sequence[str]],
# List[Sequence[str]]]", expected "Tuple[List[Tuple[str, ...]],
# List[Tuple[str, ...]]]")
return head, tail # type: ignore[return-value]
def format_object_attrs(
obj: Sized, include_dtype: bool = True
) -> List[Tuple[str, Union[str, int]]]:
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
Must be sized.
include_dtype : bool
If False, dtype won't be in the returned list
Returns
-------
list of 2-tuple
"""
attrs: List[Tuple[str, Union[str, int]]] = []
if hasattr(obj, "dtype") and include_dtype:
# error: "Sized" has no attribute "dtype"
attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore[attr-defined]
if getattr(obj, "name", None) is not None:
# error: "Sized" has no attribute "name"
attrs.append(("name", default_pprint(obj.name))) # type: ignore[attr-defined]
# error: "Sized" has no attribute "names"
elif getattr(obj, "names", None) is not None and any(
obj.names # type: ignore[attr-defined]
):
# error: "Sized" has no attribute "names"
attrs.append(("names", default_pprint(obj.names))) # type: ignore[attr-defined]
max_seq_items = get_option("display.max_seq_items") or len(obj)
if len(obj) > max_seq_items:
attrs.append(("length", len(obj)))
return attrs
class PrettyDict(Dict[_KT, _VT]):
"""Dict extension to support abbreviated __repr__"""
def __repr__(self) -> str:
return pprint_thing(self)
| bsd-3-clause |
jaeilepp/eggie | mne/viz/_3d.py | 1 | 24122 | """Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
from distutils.version import LooseVersion
import os
import inspect
import warnings
from itertools import cycle
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
from ..transforms import read_trans, _find_trans, apply_trans
from ..utils import get_subjects_dir, logger, _check_subject
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to eggie in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig
def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, source='bem'):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans_fname : str | 'auto'
The full path to the `*-trans.fif` file produced during
coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
trans = read_trans(trans_fname)
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, trans))
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, surf in enumerate(surfs):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
if ch_type is None or ch_type == 'eeg':
eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
if l['eeg_loc'] is not None]
if len(eeg_locs) > 0:
eeg_loc = np.array(eeg_locs)
# Transform EEG electrodes to MRI coordinates
eeg_loc = apply_trans(trans['trans'], eeg_loc)
with warnings.catch_warnings(record=True): # traits
mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
color=(1.0, 0.0, 0.0), scale_factor=0.005)
else:
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
mlab.view(90, 90)
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
import surfer
from surfer import Brain, TimeViewer
if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
raise NotImplementedError('hemi type "%s" not supported with your '
'version of pysurfer. Please upgrade to '
'version 0.4 or higher.' % hemi)
try:
import mayavi
from mayavi import mlab
except ImportError:
from enthought import mayavi
from enthought.mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
subject = _check_subject(stc.subject, subject, False)
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
else:
logger.info('PySurfer does not support "views" argument, please '
'consider updating to a newer version (0.4 or later)')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertno[0])]
else:
data = stc.data[len(stc.vertno[0]):]
vertices = stc.vertno[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=['cone', 'sphere'],
scale_factors=[1, 0.6],
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple))
and len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
| bsd-2-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/nltk/probability.py | 12 | 81647 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
# Liang Dong <ldong@clemson.edu> (additions)
# Geoffrey Sampson <sampson@cantab.net> (additions)
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
_NINF = float('-1e300')
import math
import random
import warnings
from operator import itemgetter
from itertools import imap, islice
from collections import defaultdict
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
# [SB] inherit from defaultdict?
# [SB] for NLTK 3.0, inherit from collections.Counter?
class FreqDist(dict):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist.inc(word.lower())
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
dict.__init__(self)
self._N = 0
self._reset_caches()
if samples:
self.update(samples)
def inc(self, sample, count=1):
"""
Increment this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any
:param count: The amount to increment the sample's count by.
:type count: int
:rtype: None
:raise NotImplementedError: If ``sample`` is not a
supported sample type.
"""
if count == 0: return
self[sample] = self.get(sample,0) + count
def __setitem__(self, sample, value):
"""
Set this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any hashable object
:param count: The new value for the sample's count
:type count: int
:rtype: None
:raise TypeError: If ``sample`` is not a supported sample type.
"""
self._N += (value - self.get(sample, 0))
dict.__setitem__(self, sample, value)
# Invalidate the caches
self._reset_caches()
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return self._N
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def samples(self):
"""
Return a list of all samples that have been recorded as
outcomes by this frequency distribution. Use ``fd[sample]``
to determine the count for each sample.
:rtype: list
"""
return self.keys()
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
"""
Return the number of samples with count r.
:type r: int
:param r: A sample count.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self:
c = self.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def _cumulative_frequencies(self, samples=None):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type sample: any
:rtype: list(float)
"""
cf = 0.0
if not samples:
samples = self.keys()
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self._N is 0:
return 0
return float(self[sample]) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if self._max_cache is None:
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
self._max_cache = max([(a,b) for (b,a) in self.items()])[1]
return self._max_cache
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab). '
'See http://matplotlib.sourceforge.net/')
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print "%4s" % str(samples[i]),
print
for i in range(len(samples)):
print "%4d" % freqs[i],
print
def _sort_keys_by_value(self):
if not self._item_cache:
self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0]))
def keys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(0), self._item_cache)
def values(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(1), self._item_cache)
def items(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: list(tuple)
"""
self._sort_keys_by_value()
return self._item_cache[:]
def __iter__(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def iterkeys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def itervalues(self):
"""
Return the values sorted in decreasing order.
:rtype: iter
"""
return iter(self.values())
def iteritems(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: iter of any
"""
self._sort_keys_by_value()
return iter(self._item_cache)
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def update(self, samples):
"""
Update the frequency distribution with the provided list of samples.
This is a faster way to add multiple samples to the distribution.
:param samples: The samples to add.
:type samples: list
"""
try:
sample_iter = samples.iteritems()
except:
sample_iter = imap(lambda x: (x,1), samples)
for sample, count in sample_iter:
self.inc(sample, count=count)
def pop(self, other):
self._N -= 1
self._reset_caches()
return dict.pop(self, other)
def popitem(self):
self._N -= 1
self._reset_caches()
return dict.popitem(self)
def clear(self):
self._N = 0
self._reset_caches()
dict.clear(self)
def _reset_caches(self):
self._Nr_cache = None
self._max_cache = None
self._item_cache = None
def __add__(self, other):
clone = self.copy()
clone.update(other)
return clone
def __le__(self, other):
if not isinstance(other, FreqDist): return False
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
def __lt__(self, other):
if not isinstance(other, FreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, FreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, FreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]]
if len(self) > 10:
items.append('...')
return '<FreqDist: %s>' % ', '.join(items)
def __getitem__(self, sample):
return self.get(sample, 0)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p, 2)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, 1-p))
return random.choice(list(self.samples()))
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probabiliy to all values.
"""
if prob_dict is None:
self._prob_dict = {}
else:
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return 2**(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is paramaterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalant to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.N())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([fd.keys() for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalising factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
if c == 0:
return self._P0
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# The situation frequency zero is quite common in the original
# Good-Turing estimation. Bill Gale and Geoffrey Sampson present a
# simple and effective approach, Simple Good-Turing. As a smoothing
# curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationsihp)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greather than the standar deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count *c\**:
- *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
- *things with frequency zero in training* = N(1) for c == 0
where *c* is the original count, *N(i)* is the number of event types
observed with count *i*. We can think the count of unseen as the count
of frequency one (see Jurafsky & Martin 2nd Edition, p101).
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
count = self._freqdist[sample]
# unseen sample's frequency (count zero) uses frequency one's
if count == 0 and self._freqdist.N() != 0:
p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._bins == self._freqdist.B():
p0 = 0.0
else:
p0 = p0 / (1.0 * self._bins - self._freqdist.B())
nc = self._freqdist.Nr(count)
ncn = self._freqdist.Nr(count + 1)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0
return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
"""
:return: The probability mass transferred from the
seen samples to the unseen samples.
:rtype: float
"""
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to freqency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the freqency and
yi denotes the freqency of freqency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'Bins parameter must not be less than freqdist.B() + 1'
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
r, nr = [], []
b, i = 0, 0
while b != self._freqdist.B():
nr_i = self._freqdist.Nr(i)
if nr_i > 0:
b += nr_i
r.append(i)
nr.append(nr_i)
i += 1
return (r, nr)
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
if j > 0:
i = r[j-1]
else:
i = 0
if j != len(r) - 1:
k = r[j+1]
else:
k = 2 * r[j] - i
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
if x_var != 0:
self._slope = xy_cov / x_var
else:
self._slope = 0.0
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of freqency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print "Probability Sum:", prob_sum
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
try:
import numpy
except ImportError:
print "Error: Please install numpy; for instructions see http://www.nltk.org/"
exit()
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return 2**(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return self._data[i]
else:
return math.log(self._data[i], 2)
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = math.log(prob, 2)
else:
if log: self._data[i] = 2**(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = [pdist.prob(s) for s in pdist.samples()]
return -sum([p * math.log(p,2) for p in probs])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition].inc(word)
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 6 outcomes>
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond].inc(sample)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return sorted(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.itervalues())
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = str(condition)
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len(str(c)) for c in conditions)
print ' ' * condition_size,
for s in samples:
print "%4s" % str(s),
print
for c in conditions:
print "%*s" % (condition_size, str(c)),
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print "%4d" % f,
print
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
class ConditionalProbDistI(defaultdict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return self.keys()
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
# self._probdist_factory = probdist_factory
# self._cfdist = cfdist
# self._factory_args = factory_args
# self._factory_kw_args = factory_kw_args
factory = lambda: probdist_factory(FreqDist(),
*factory_args, **factory_kw_args)
defaultdict.__init__(self, factory)
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
defaultdict.__init__(self, DictionaryProbDist)
self.update(probdist_dict)
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
GoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
print 'Generating:'
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print '%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55])
print
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
gt = GoodTuringProbDist(fd)
sgt = SimpleGoodTuringProbDist(fd)
katz = SimpleGoodTuringProbDist(fd, 7)
print '%18s %8s %12s %14s %12s' \
% ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" )
for key in fd:
print '%18s %8d %12e %14e %12e' \
% (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| agpl-3.0 |
phobson/pygridtools | pygridtools/tests/test_core.py | 2 | 24947 | import os
import warnings
from pkg_resources import resource_filename
import tempfile
import numpy
from numpy import nan
import pandas
from shapely.geometry import Polygon
import geopandas
import pytest
import numpy.testing as nptest
import pandas.util.testing as pdtest
from pygridtools import core
from pygridgen.tests import raises
from . import utils
BASELINE_IMAGES = 'baseline_files/test_core'
try:
import pygridgen
HASPGG = True
except ImportError:
HASPGG = False
@pytest.fixture
def A():
return numpy.arange(12).reshape(4, 3).astype(float)
@pytest.fixture
def B():
return numpy.arange(8).reshape(2, 4).astype(float)
@pytest.fixture
def C():
return numpy.arange(25).reshape(5, 5).astype(float)
@pytest.mark.parametrize('fxn', [numpy.fliplr, numpy.flipud, numpy.fliplr])
def test_transform(A, fxn):
result = core.transform(A, fxn)
expected = fxn(A)
nptest.assert_array_equal(result, expected)
@pytest.mark.parametrize(('index', 'axis', 'first', 'second'), [
(3, 0, 'top', 'bottom'),
(2, 1, 'left', 'right'),
(5, 0, None, None),
(5, 1, None, None),
])
def test_split_rows(C, index, axis, first, second):
expected = {
'top': numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
]),
'bottom': numpy.array([
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.],
]),
'left': numpy.array([
[ 0., 1.],
[ 5., 6.],
[10., 11.],
[15., 16.],
[20., 21.],
]),
'right': numpy.array([
[ 2., 3., 4.],
[ 7., 8., 9.],
[12., 13., 14.],
[17., 18., 19.],
[22., 23., 24.],
]),
}
if first and second:
a, b = core.split(C, index, axis)
nptest.assert_array_equal(a, expected[first])
nptest.assert_array_equal(b, expected[second])
else:
with raises(ValueError):
left, right = core.split(C, index, axis=axis)
@pytest.mark.parametrize('N', [1, 3, None])
def test__interp_between_vectors(N):
index = numpy.arange(0, 4)
vector1 = -1 * index**2 - 1
vector2 = 2 * index**2 + 2
expected = {
1: numpy.array([
[ -1.0, -2.0, -5.0, -10.0],
[ 0.5, 1.0, 2.5, 5.0],
[ 2.0, 4.0, 10.0, 20.0],
]),
3: numpy.array([
[ -1.00, -2.00, -5.00, -10.00],
[ -0.25, -0.50, -1.25, -2.50],
[ 0.50, 1.00, 2.50, 5.00],
[ 1.25, 2.50, 6.25, 12.50],
[ 2.00, 4.00, 10.00, 20.00],
])
}
if N:
result = core._interp_between_vectors(vector1, vector2, n_nodes=N)
nptest.assert_array_equal(result, expected[N])
else:
with raises(ValueError):
core._interp_between_vectors(vector1, vector2, n_nodes=0)
@pytest.mark.parametrize(('n', 'axis'), [
(1, 0), (4, 0), (1, 1), (3, 1)
])
def test_insert(C, n, axis):
expected = {
(1, 0): numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[ 7.5, 8.5, 9.5, 10.5, 11.5],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]),
(4, 0): numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[ 7.0, 8.0, 9.0, 10.0, 11.0],
[ 8.0, 9.0, 10.0, 11.0, 12.0],
[ 9.0, 10.0, 11.0, 12.0, 13.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]),
(1, 1): numpy.array([
[ 0.0, 1.0, 1.5, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 6.5, 7.0, 8.0, 9.0],
[10.0, 11.0, 11.5, 12.0, 13.0, 14.0],
[15.0, 16.0, 16.5, 17.0, 18.0, 19.0],
[20.0, 21.0, 21.5, 22.0, 23.0, 24.0],
]),
(3, 1): numpy.array([
[ 0.00, 1.00, 1.25, 1.50, 1.75, 2.00, 3.00, 4.00],
[ 5.00, 6.00, 6.25, 6.50, 6.75, 7.00, 8.00, 9.00],
[10.00, 11.00, 11.25, 11.50, 11.75, 12.00, 13.00, 14.00],
[15.00, 16.00, 16.25, 16.50, 16.75, 17.00, 18.00, 19.00],
[20.00, 21.00, 21.25, 21.50, 21.75, 22.00, 23.00, 24.00],
])
}
result = core.insert(C, 2, axis=axis, n_nodes=n)
nptest.assert_array_equal(result, expected[(n, axis)])
@pytest.mark.parametrize('how', ['h', 'v'])
@pytest.mark.parametrize('where', ['+', '-'])
@pytest.mark.parametrize('shift', [0, 2, -1])
def test_merge(A, B, how, where, shift):
expected = {
('v', '+', 0): numpy.array([
[0., 1., 2., nan],
[3., 4., 5., nan],
[6., 7., 8., nan],
[9., 10., 11., nan],
[0., 1., 2., 3.],
[4., 5., 6., 7.]
]),
('v', '-', 0): numpy.array([
[0., 1., 2., 3.],
[4., 5., 6., 7.],
[0., 1., 2., nan],
[3., 4., 5., nan],
[6., 7., 8., nan],
[9., 10., 11., nan]
]),
('v', '+', 2): numpy.array([
[ 0., 1., 2., nan, nan, nan],
[ 3., 4., 5., nan, nan, nan],
[ 6., 7., 8., nan, nan, nan],
[ 9., 10., 11., nan, nan, nan],
[nan, nan, 0., 1., 2., 3.],
[nan, nan, 4., 5., 6., 7.]
]),
('v', '-', 2): numpy.array([
[nan, nan, 0., 1., 2., 3.],
[nan, nan, 4., 5., 6., 7.],
[ 0., 1., 2., nan, nan, nan],
[ 3., 4., 5., nan, nan, nan],
[ 6., 7., 8., nan, nan, nan],
[ 9., 10., 11., nan, nan, nan]
]),
('v', '+', -1): numpy.array([
[nan, 0., 1., 2.],
[nan, 3., 4., 5.],
[nan, 6., 7., 8.],
[nan, 9., 10., 11.],
[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]
]),
('v', '-', -1): numpy.array([
[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[nan, 0., 1., 2.],
[nan, 3., 4., 5.],
[nan, 6., 7., 8.],
[nan, 9., 10., 11.]
]),
('h', '+', 0): numpy.array([
[0., 1., 2., 0., 1., 2., 3.],
[3., 4., 5., 4., 5., 6., 7.],
[6., 7., 8., nan, nan, nan, nan],
[9., 10., 11., nan, nan, nan, nan]
]),
('h', '-', 0): numpy.array([
[ 0., 1., 2., 3., 0., 1., 2.],
[ 4., 5., 6., 7., 3., 4., 5.],
[nan, nan, nan, nan, 6., 7., 8.],
[nan, nan, nan, nan, 9., 10., 11.]
]),
('h', '+', 2): numpy.array([
[0., 1., 2., nan, nan, nan, nan],
[3., 4., 5., nan, nan, nan, nan],
[6., 7., 8., 0., 1., 2., 3.],
[9., 10., 11., 4., 5., 6., 7.]
]),
('h', '-', 2): numpy.array([
[nan, nan, nan, nan, 0., 1., 2.],
[nan, nan, nan, nan, 3., 4., 5.],
[ 0., 1., 2., 3., 6., 7., 8.],
[ 4., 5., 6., 7., 9., 10., 11.]
]),
('h', '+', -1): numpy.array([
[nan, nan, nan, 0., 1., 2., 3.],
[ 0., 1., 2., 4., 5., 6., 7.],
[ 3., 4., 5., nan, nan, nan, nan],
[ 6., 7., 8., nan, nan, nan, nan],
[ 9., 10., 11., nan, nan, nan, nan]
]),
('h', '-', -1): numpy.array([
[ 0., 1., 2., 3., nan, nan, nan],
[ 4., 5., 6., 7., 0., 1., 2.],
[nan, nan, nan, nan, 3., 4., 5.],
[nan, nan, nan, nan, 6., 7., 8.],
[nan, nan, nan, nan, 9., 10., 11.]
]),
}
result = core.merge(A, B, how=how, where=where, shift=shift)
nptest.assert_array_equal(result, expected[(how, where, shift)])
@pytest.fixture
def g1(simple_nodes):
xn, yn = simple_nodes
g = core.ModelGrid(xn[:, :3], yn[:, :3])
mask = g.cell_mask
mask[:2, :2] = True
g.cell_mask = mask
return g
@pytest.fixture
def g2(simple_nodes):
xn, yn = simple_nodes
g = core.ModelGrid(xn[2:5, 3:], yn[2:5, 3:])
return g
@pytest.fixture
def polyverts():
return geopandas.GeoSeries(Polygon([(2.4, 0.9), (3.6, 0.9), (3.6, 2.4), (2.4, 2.4)]))
def test_ModelGrid_bad_shapes(simple_cells):
xc, yc = simple_cells
with raises(ValueError):
mg = core.ModelGrid(xc, yc[2:, 2:])
def test_ModelGrid_nodes_and_cells(g1, simple_cells):
xc, yc = simple_cells
assert (isinstance(g1.nodes_x, numpy.ndarray))
assert (isinstance(g1.nodes_y, numpy.ndarray))
assert (isinstance(g1.cells_x, numpy.ndarray))
nptest.assert_array_equal(g1.cells_x, xc[:, :2])
assert (isinstance(g1.cells_y, numpy.ndarray))
nptest.assert_array_equal(g1.cells_y, yc[:, :2])
def test_ModelGrid_counts_and_shapes(g1):
expected_rows = 9
expected_cols = 3
assert (g1.icells == expected_cols - 1)
assert (g1.jcells == expected_rows - 1)
assert (g1.inodes == expected_cols)
assert (g1.jnodes == expected_rows)
assert (g1.shape == (expected_rows, expected_cols))
assert (g1.cell_shape == (expected_rows - 1, expected_cols - 1))
def test_ModelGrid_cell_mask(g1):
expected_mask = numpy.array([
[1, 1], [1, 1], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0],
])
nptest.assert_array_equal(g1.cell_mask, expected_mask)
@pytest.mark.parametrize(('usemask', 'which', 'error'), [
(True, 'nodes', ValueError),
(False, 'nodes', None),
(True, 'cells', None),
])
def test_ModelGrid_to_dataframe(g1, usemask, which, error):
def name_cols(df):
df.columns.names = ['coord', 'ii']
df.index.names = ['jj']
return df
if error:
with raises(ValueError):
g1.to_dataframe(usemask=usemask, which=which)
else:
expected = {
(False, 'nodes'): pandas.DataFrame({
('easting', 0): {
0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0,
5: 1.0, 6: 1.0, 7: 1.0, 8: 1.0
}, ('easting', 1): {
0: 1.5, 1: 1.5, 2: 1.5, 3: 1.5, 4: 1.5,
5: 1.5, 6: 1.5, 7: 1.5, 8: 1.5
}, ('easting', 2): {
0: 2.0, 1: 2.0, 2: 2.0, 3: 2.0, 4: 2.0,
5: 2.0, 6: 2.0, 7: 2.0, 8: 2.0
}, ('northing', 0): {
0: 0.0, 1: 0.5, 2: 1.0, 3: 1.5, 4: 2.0,
5: 2.5, 6: 3.0, 7: 3.5, 8: 4.0
}, ('northing', 1): {
0: 0.0, 1: 0.5, 2: 1.0, 3: 1.5, 4: 2.0,
5: 2.5, 6: 3.0, 7: 3.5, 8: 4.0
}, ('northing', 2): {
0: 0.0, 1: 0.5, 2: 1.0, 3: 1.5, 4: 2.0,
5: 2.5, 6: 3.0, 7: 3.5, 8: 4.0}
}).pipe(name_cols),
(True, 'cells'): pandas.DataFrame({
('easting', 0): {
0: nan, 1: nan, 2: 1.25, 3: 1.25, 4: 1.25,
5: 1.25, 6: 1.25, 7: 1.25
}, ('easting', 1): {
0: nan, 1: nan, 2: 1.75, 3: 1.75, 4: 1.75,
5: 1.75, 6: 1.75, 7: 1.75
}, ('northing', 0): {
0: nan, 1: nan, 2: 1.25, 3: 1.75, 4: 2.25,
5: 2.75, 6: 3.25, 7: 3.75
}, ('northing', 1): {
0: nan, 1: nan, 2: 1.25, 3: 1.75, 4: 2.25,
5: 2.75, 6: 3.25, 7: 3.75
}
}).pipe(name_cols),
}
result = g1.to_dataframe(usemask=usemask, which=which)
pdtest.assert_frame_equal(result, expected[(usemask, which)], check_names=False)
pdtest.assert_index_equal(result.columns, expected[(usemask, which)].columns)
@pytest.mark.parametrize(('usemask', 'which', 'error'), [
(True, 'nodes', ValueError),
(False, 'nodes', None),
(True, 'cells', None),
(False, 'cells', None),
])
def test_ModelGrid_to_coord_pairs(g1, usemask, which, error):
if error:
with raises(error):
g1.to_coord_pairs(usemask=usemask, which=which)
else:
expected = {
('nodes', False): numpy.array([
[1.0, 0.0], [1.5, 0.0], [2.0, 0.0], [1.0, 0.5],
[1.5, 0.5], [2.0, 0.5], [1.0, 1.0], [1.5, 1.0],
[2.0, 1.0], [1.0, 1.5], [1.5, 1.5], [2.0, 1.5],
[1.0, 2.0], [1.5, 2.0], [2.0, 2.0], [1.0, 2.5],
[1.5, 2.5], [2.0, 2.5], [1.0, 3.0], [1.5, 3.0],
[2.0, 3.0], [1.0, 3.5], [1.5, 3.5], [2.0, 3.5],
[1.0, 4.0], [1.5, 4.0], [2.0, 4.0]
]),
('cells', False): numpy.array([
[1.25, 0.25], [1.75, 0.25], [1.25, 0.75], [1.75, 0.75],
[1.25, 1.25], [1.75, 1.25], [1.25, 1.75], [1.75, 1.75],
[1.25, 2.25], [1.75, 2.25], [1.25, 2.75], [1.75, 2.75],
[1.25, 3.25], [1.75, 3.25], [1.25, 3.75], [1.75, 3.75]
]),
('cells', True): numpy.array([
[nan, nan], [nan, nan], [nan, nan], [nan, nan],
[1.25, 1.25], [1.75, 1.25], [1.25, 1.75], [1.75, 1.75],
[1.25, 2.25], [1.75, 2.25], [1.25, 2.75], [1.75, 2.75],
[1.25, 3.25], [1.75, 3.25], [1.25, 3.75], [1.75, 3.75]
])
}
result = g1.to_coord_pairs(usemask=usemask, which=which)
nptest.assert_array_equal(result, expected[which, usemask])
def test_ModelGrid_transform(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transform(lambda x: x * 10)
nptest.assert_array_equal(g.xn, xn * 10)
nptest.assert_array_equal(g.yn, yn * 10)
def test_ModelGrid_transform_x(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transform_x(lambda x: x * 10)
nptest.assert_array_equal(g.xn, xn * 10)
nptest.assert_array_equal(g.yn, yn)
def test_ModelGrid_transform_y(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transform_y(lambda y: y * 10)
nptest.assert_array_equal(g.xn, xn)
nptest.assert_array_equal(g.yn, yn * 10)
def test_ModelGrid_transpose(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transpose()
nptest.assert_array_equal(g.xn, xn.T)
nptest.assert_array_equal(g.yn, yn.T)
def test_ModelGrid_fliplr(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.fliplr()
nptest.assert_array_equal(g.xn, numpy.fliplr(xn))
nptest.assert_array_equal(g.yn, numpy.fliplr(yn))
def test_ModelGrid_flipud(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.flipud()
nptest.assert_array_equal(g.xn, numpy.flipud(xn))
nptest.assert_array_equal(g.yn, numpy.flipud(yn))
def test_ModelGrid_split_ax0(mg, simple_nodes):
xn, yn = simple_nodes
mgtop, mgbottom = mg.split(3, axis=0)
nptest.assert_array_equal(mgtop.nodes_x, xn[:3, :])
nptest.assert_array_equal(mgtop.nodes_y, yn[:3, :])
nptest.assert_array_equal(mgbottom.nodes_x, xn[3:, :])
nptest.assert_array_equal(mgbottom.nodes_y, yn[3:, :])
def test_ModelGrid_node_mask(simple_nodes):
g = core.ModelGrid(*simple_nodes).update_cell_mask()
expected = numpy.array([
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1]
]).astype(bool)
nptest.assert_array_equal(expected, g.node_mask)
def test_ModelGrid_merge(g1, g2, simple_nodes):
g3 = g1.merge(g2, how='horiz', where='+', shift=2)
g4 = core.ModelGrid(*simple_nodes).update_cell_mask()
nptest.assert_array_equal(g3.xn, g4.xn)
nptest.assert_array_equal(g3.xc, g4.xc)
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=15)
def test_ModelGrid_merge_with_mask(simple_nodes):
mg1 = core.ModelGrid(*simple_nodes).update_cell_mask()
mg2 = (
mg1.transform_x(lambda x: x + 1)
.transform_y(lambda y: y + 5)
.update_cell_mask(mask=mg1.cell_mask)
)
merged = mg1.merge(mg2, where='+', shift=1, min_nodes=1)
expected = numpy.array([
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1]
]).astype(bool)
nptest.assert_array_equal(merged.cell_mask, expected)
fig, artists = merged.plot_cells()
return fig
def test_ModelGrid_insert_3_ax0(mg):
known_xnodes = numpy.ma.masked_invalid(numpy.array([
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
]))
known_ynodes = numpy.ma.masked_invalid(numpy.array([
[0.000, 0.000, 0.000, nan, nan, nan, nan],
[0.500, 0.500, 0.500, nan, nan, nan, nan],
[0.625, 0.625, 0.625, nan, nan, nan, nan],
[0.750, 0.750, 0.750, nan, nan, nan, nan],
[0.875, 0.875, 0.875, nan, nan, nan, nan],
[1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000],
[1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500],
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[2.500, 2.500, 2.500, nan, nan, nan, nan],
[3.000, 3.000, 3.000, nan, nan, nan, nan],
[3.500, 3.500, 3.500, nan, nan, nan, nan],
[4.000, 4.000, 4.000, nan, nan, nan, nan],
]))
result = mg.insert(2, axis=0, n_nodes=3)
nptest.assert_array_equal(result.nodes_x, known_xnodes)
nptest.assert_array_equal(result.nodes_y, known_ynodes)
def test_ModelGrid_insert_3_ax1(mg):
known_xnodes = numpy.ma.masked_invalid(numpy.array([
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, 2.500, 3.000, 3.500, 4.000],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, 2.500, 3.000, 3.500, 4.000],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, 2.500, 3.000, 3.500, 4.000],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan]
]))
known_ynodes = numpy.ma.masked_invalid(numpy.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, nan],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, nan, nan, nan, nan],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0],
[2.5, 2.5, 2.5, 2.5, 2.5, 2.5, nan, nan, nan, nan],
[3.0, 3.0, 3.0, 3.0, 3.0, 3.0, nan, nan, nan, nan],
[3.5, 3.5, 3.5, 3.5, 3.5, 3.5, nan, nan, nan, nan],
[4.0, 4.0, 4.0, 4.0, 4.0, 4.0, nan, nan, nan, nan],
]))
result = mg.insert(2, axis=1, n_nodes=3)
nptest.assert_array_equal(result.nodes_x, known_xnodes)
nptest.assert_array_equal(result.nodes_y, known_ynodes)
def test_extract(mg, simple_nodes):
xn, yn = simple_nodes
result = mg.extract(jstart=2, jend=5, istart=3, iend=6)
nptest.assert_array_equal(result.nodes_x, xn[2:5, 3:6])
nptest.assert_array_equal(result.nodes_y, yn[2:5, 3:6])
@pytest.mark.parametrize(('where', 'use_existing'), [
('inside', False),
('inside', True),
('outside', False)
])
def test_ModelGrid_mask_centroids(mg, polyverts, where, use_existing):
expected = {
('inside', False): numpy.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]),
('inside', True): numpy.array([
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]
]),
('outside', False): numpy.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 1],
[1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]
])
}
result = mg.mask_centroids(**{where: polyverts}, use_existing=use_existing)
nptest.assert_array_equal(
result.cell_mask.astype(int),
expected[(where, use_existing)].astype(int)
)
@pytest.mark.parametrize(('kwargs', 'error'), [
[dict(min_nodes=0), ValueError],
[dict(min_nodes=5), ValueError],
[dict(triangles=True), NotImplementedError],
])
def test_ModelGrid_mask_nodes_errors(mg, polyverts, kwargs, error):
with raises(error):
mg.mask_nodes(inside=polyverts, **kwargs)
def test_masks_no_polys(mg):
with raises(ValueError):
mg.mask_nodes()
with raises(ValueError):
mg.mask_centroids()
def test_ModelGrid_to_point_geodataframe(g1):
expectedfile = resource_filename('pygridtools.tests.baseline_files', 'mgshp_nomask_nodes_points.shp')
expected = geopandas.read_file(expectedfile)
result = g1.to_point_geodataframe(which='nodes', usemask=False)
utils.assert_gdfs_equal(expected.drop(columns=['river', 'reach']), result)
@pytest.mark.xfail
@pytest.mark.parametrize('usemask', [True, False])
def test_ModelGrid_to_gis_cells(g1, usemask):
expectedfile = {
True: 'mgshp_mask_cells_polys.shp',
False: 'mgshp_nomask_cells_polys.shp',
}
expectedfile = resource_filename('pygridtools.tests.baseline_files',
expectedfile[usemask])
expected = geopandas.read_file(expectedfile)
result = g1.to_polygon_geodataframe(usemask=usemask)
utils.assert_gdfs_equal(expected.drop(columns=['river', 'reach']), result)
@pytest.mark.parametrize(('which', 'usemask', 'error'), [
('nodes', True, ValueError),
('junk', False, ValueError),
('nodes', False, None),
('cells', False, None),
])
def test_ModelGrid__get_x_y_nodes_and_mask(g1, which, usemask, error):
if error:
with raises(error):
g1._get_x_y(which, usemask=usemask)
else:
x, y = g1._get_x_y(which, usemask=usemask)
nptest.assert_array_equal(x, getattr(g1, 'x' + which[0]))
nptest.assert_array_equal(y, getattr(g1, 'y' + which[0]))
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=15)
def test_ModelGrid_plots_basic(simple_nodes):
mg = core.ModelGrid(*simple_nodes)
mg.cell_mask = numpy.ma.masked_invalid(mg.xc).mask
fig, artists = mg.plot_cells()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=15)
def test_ModelGrid_plots_masked(river_grid, river_bathy):
fig, artists = river_grid.plot_cells(cell_kws=dict(colors=river_bathy, cmap='Reds_r'))
return fig
@pytest.mark.parametrize(('otherargs', 'gridtype'), [
(dict(), None),
(dict(verbose=True), None),
(dict(rawgrid=False), core.ModelGrid)
])
@pytest.mark.skipif(not HASPGG, reason='pygridgen unavailabile')
def test_make_grid(simple_boundary_gdf, otherargs, gridtype):
if not gridtype:
gridtype = pygridgen.Gridgen
gridparams = {'nnodes': 12, 'verbose': False, 'ul_idx': 0}
gridparams.update(otherargs)
grid = core.make_grid(9, 7, domain=simple_boundary_gdf, **gridparams)
assert (isinstance(grid, gridtype))
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/graphics/mosaicplot.py | 6 | 26886 | """Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from statsmodels.compat.collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
from pandas import DataFrame
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be"
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum((x + w / 2.0) * w * h / W for (x, y, w, h) in vals)
y_lab = sum((y + h / 2.0) * w * h / W for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary',
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
if isinstance(data, DataFrame) and index is None:
raise ValueError("You must pass an index if data is a DataFrame."
" See examples.")
from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects
| bsd-3-clause |
thomas-bottesch/fcl | python/utils/create_pca_vectors_from_dataset.py | 1 | 2284 | from __future__ import print_function
import fcl
import os
import time
from os.path import abspath, join, dirname, isfile
from fcl import kmeans
from fcl.datasets import load_sector_dataset, load_usps_dataset
from fcl.matrix.csr_matrix import get_csr_matrix_from_object, csr_matrix_to_libsvm_string
from sklearn.decomposition import TruncatedSVD, PCA
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file
import numpy as np
import argparse
def get_pca_projection_csrmatrix(fcl_csr_input_matrix, component_ratio):
n_components = int(fcl_csr_input_matrix.annz * component_ratio)
p = TruncatedSVD(n_components = n_components)
start = time.time()
p.fit(fcl_csr_input_matrix.to_numpy())
# convert to millis
fin = (time.time() - start) * 1000
(n_samples, n_dim) = fcl_csr_input_matrix.shape
print("Truncated SVD took %.3fs to retrieve %s components for input_matrix with n_samples %d, n_dim %d" % (fin/1000.0, str(n_components), n_samples, n_dim))
return get_csr_matrix_from_object(p.components_)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a pca matrix from an input matrix with given component ratio.')
parser.add_argument('path_input_dataset', type=str, help='Path to the input libsvm dataset')
parser.add_argument('path_output_dataset', type=str, help='Path to the input libsvm dataset')
parser.add_argument('--component_ratio', default=0.1, type=float, help='Percentage of the average non zero values of the input dataset to use as components.')
args = parser.parse_args()
if not isfile(args.path_input_dataset):
raise Exception("Unable to find path_input_dataset: %s" % args.path_input_dataset)
print("Loading data from %s" % args.path_input_dataset)
fcl_mtrx_input_dataset = get_csr_matrix_from_object(args.path_input_dataset)
print("Retrieving the pca projection matrix")
pca_mtrx = get_pca_projection_csrmatrix(fcl_mtrx_input_dataset, args.component_ratio)
print("Convert pca projection matrix to libsvm string")
pca_mtrx_lsvm_str = csr_matrix_to_libsvm_string(pca_mtrx)
print("Writing pca projection matrix libsvm string to file %s" % args.path_output_dataset)
with open(args.path_output_dataset, 'w') as f:
f.write(pca_mtrx_lsvm_str)
| mit |
kjung/scikit-learn | sklearn/pipeline.py | 12 | 21283 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
LAIRLAB/qr_trees | src/python/run_ilqr_diffdrive.py | 1 | 2328 | #!/usr/bin/env python
#
# Arun Venkatraman (arunvenk@cs.cmu.edu)
# December 2016
#
# If we are not running from the build directory, then add lib to path from
# build assuming we are running from the python folder
import os
full_path = os.path.realpath(__file__)
if full_path.count("src/python") > 0:
import sys
to_add = os.path.abspath(os.path.join(os.path.split(full_path)[0], "../../build/"))
sys.path.append(to_add)
from IPython import embed
import lib.ilqr_diffdrive as ilqr
import visualize_circle_world as vis
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
obs_prior = [0.5, 0.5]
world_dims = [-30, 30, -30, 30]
w1 = ilqr.CircleWorld(world_dims)
w2 = ilqr.CircleWorld(world_dims)
obs_pos_1 = [-2, 0.0]
obs_pos_2 = [2, 0.0]
obs_radius = 10.0
obstacle_1 = ilqr.Circle(obs_radius, obs_pos_1);
obstacle_2 = ilqr.Circle(obs_radius, obs_pos_2);
# add obstacle to world 1
w1.add_obstacle(obstacle_1);
# add obstacle to world 2
w2.add_obstacle(obstacle_2);
cost, states_true_1, obs_fname_1 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w1, w2, obs_prior, "true1", "true1")
cost, states_true_2, obs_fname_2 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w2, w1, obs_prior, "true2", "true2")
cost, states_weighted_1, obs_fname_3 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w1, w2, obs_prior, "weight3", "weight3")
cost, states_weighted_2, obs_fname_4 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w2, w1, obs_prior, "weight4", "weight4")
cost, states_hind_1, obs_fname_5 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w1, w2, obs_prior, "hind3", "hind3")
cost, states_hind_2, obs_fname_6 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w2, w1, obs_prior, "hind4", "hind4")
print("Drawing world 1")
ax1 = vis.parse_draw_files([states_true_1, states_weighted_1, states_hind_1], obs_fname_1,
show=False)
plt.title('World 1')
print("Drawing world 2")
ax2 = vis.parse_draw_files([states_true_2, states_weighted_2, states_hind_2],
obs_fname_2, show=False)
plt.title('World 2')
plt.show()
embed()
| bsd-3-clause |
mcvidomi/poim2motif | run_svm_real.py | 1 | 1483 | '''
Created on 08.06.2015
@author: marinavidovic
'''
import os
import pdb
import utils_svm
import pickle
import numpy as np
import copy
import genQ
import makePOIM
import view
import matplotlib
matplotlib.use('Agg')
if __name__ == '__main__':
read_data = 1
datapath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data.txt"
savepath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data0.pkl"
lines=1000
if read_data:
x,y=utils_svm.extractRealData(datapath,savepath,lines)
else:
fobj=open(savepath,'rb')
x,y=pickle.load(fobj)
fobj.close()
num_pos = 100
num_neg = 4*num_pos
print "reduce samples"
x_red,y_red = utils_svm.reduce_samples(copy.deepcopy(x),copy.deepcopy(y),num_pos,num_neg)
nploci_letters,nploci_positions = utils_svm.non_polymorphic_loci(x_red)
#read data
experiment_name = "real1"
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
poimpath=experiment_name+"/poim.pkl"
tally=30
positives=25
sequenceno=100
mutation_prob=0.0
motif="ATTTT"
mu=13
x,y = makePOIM.gensequences(tally,positives,sequenceno,mutation_prob,motif,mu)
#compute POIM
poim_degree = 6
kernel_degree = 8
print "start poim computation"
poims = makePOIM.computePOIM(x,y,poim_degree,kernel_degree,poimpath)
Q2 = poims[0][1]
#view.test()
view.figurepoimsimple(Q2, "poim_pic", 0)
| mit |
phev8/dataset_tools | experiment_handler/time_synchronisation.py | 1 | 1444 | import os
import pandas as pd
def read_synchronisation_file(experiment_root):
filepath = os.path.join(experiment_root, "labels", "synchronisation.csv")
return pd.read_csv(filepath)
def convert_timestamps(experiment_root, timestamps, from_reference, to_reference):
"""
Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time)
Parameters
----------
experiment_root: str
Root of the current experiment (to find the right synchronisation matrix)
timestamps: float or array like
timestamps to be converted
from_reference: str
name of the reference of the original timestamps
to_reference: str
name of the reference time the timestamp has to be converted to
Returns
-------
converted_timestamps: float or array like
Timestamps given in to_reference time values
"""
synchronisation_file = read_synchronisation_file(experiment_root)
offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0]
converted_timestamps = timestamps + offset
return converted_timestamps
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video")) | mit |
xfaxca/pygaero | example/tmax_peakfind_example.py | 1 | 4986 | # tmax_peakfind_example.py
"""
Demonstration of some of the primary functions in pygaero, including Tmax finding and elemental analysis.
"""
# Module import
from pygaero import pio
from pygaero import therm
from pygaero import gen_chem
import os
import matplotlib.pyplot as plt
def example1():
# ------------------------------- File I/O and Data Cleaning Example -------------------------------- #
indir = "" # input directory (same folder as script by default)
infiles = ['desorb1.csv', 'desorb2.csv'] # input files as a list of strings
# Read in list of csvs with figaero desorptions
df_desorbs_ls = pio.read_files(fdir=indir, flist=infiles)
print('# of files imported: ', len(df_desorbs_ls))
# Clean ion names from default A_CxHyOzI_Avg format (strip underscores '_' and remove iodide
for df in df_desorbs_ls:
print("Example of ion names before clean: ", df.columns.values[0:3])
df.columns = gen_chem.cln_molec_names(idx_names=df.columns.values, delim="_") # remove underscores
df.columns = gen_chem.replace_group(molec_ls=df.columns.values, old_groups=["I"], new_group="") # remove I
print('Example of ion names after clean: ', df.columns.values[0:3])
# Alternatively, one can just assign a single thermogram by df_example = pd.DataFrame.from_csv(indir+infile)
# Adjust thermogram signals for 4.0 LPM figaero flow rate relative to nominal 2.0 LPM sample rate
# print('Before flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
therm.flow_correction(thermograms=df_desorbs_ls, aero_samp_rates=[4.0, 4.0])
# print('After flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
# ---------------------------------- Elemental Stats Example --------------------------------------- #
# A. Calculate elemental statistics for species in each desorb CSV that was read in. Then append the DataFrames
# containing these statistics into a list. Note, Iodide has been stripped from the names at this point, so
# the parameter cluster_group=None
ele_stats_ls = []
for df in df_desorbs_ls:
df_ele_temp = gen_chem.ele_stats(molec_ls=df.columns.values, ion_state=-1, cluster_group=None,
clst_group_mw=0.0, xtra_elements=["Cl", "F"])
ele_stats_ls.append(df_ele_temp)
# -------------------------------- Peak Finding (TMax) Example --------------------------------------#
# A. Smooth time series as step prior to Tmax (helps prevent mis-identification of TMax in noisy signals)
for df in df_desorbs_ls:
for series in df.columns.values:
# print('series: ', series)
df.ix[:, series] = therm.smooth(x=df.ix[:, series].values, window='hamming', window_len=15)
plt.show()
# B. Find TMax for all loaded thermograms. Returns a pandas DataFrame with ion names as index values and columns:
# TMax1, MaxSig1, TMax2, MaxSig2, DubFlag (double peak flag - binary; -1 for no peaks found). Depending on the
# specific data set, the [pk_thresh] and [pk_win] parameters may need to be optimized. See documentation for
# function peakfind_df_ls in module therm.py for more details. Results are drastically improved by first
# smoothing the time series, so that small fluctuations in signal are not mistaken for a peak.
df_tmax_ls = therm.peakfind_df_ls(df_ls=df_desorbs_ls, pk_thresh=0.05, pk_win=20,
min_temp=40.0, max_temp=190.0)
# C. Quick plot to visualize Tmax values for 15 example ions
# therm.plot_tmax(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
# tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'], tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'])
therm.plot_tmax_double(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'],
tmax_temps2=df_tmax_ls[0].ix[15:29, 'TMax2'],
tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'],
tmax_vals2=df_tmax_ls[0].ix[15:29, 'MaxSig2'])
# ----------------------------------- Saving Results Example -------------------------------------- #
# Uncomment the following lines to save the example output
# outdir = 'testout'
# if outdir[-1] != '/':
# outdir += '/'
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# # A. Save TMax data
# for df, fname in zip(df_tmax_ls, ["desorb1_tmax", "desorb2_tmax"]):
# df.to_csv(outdir+fname+".csv")
# # B. Save smoothed desorption thermogram time series
# for df, fname in zip(df_desorbs_ls, ["desorb1_smth", "desorb2_smth"]):
# df.to_csv(outdir+fname+".csv")
# # C. Save elemental stats for each desorption
# for df, fname in zip(ele_stats_ls, ["desorb1_ele", "desorb2_ele"]):
# df.to_csv(outdir+fname+".csv")
return 0
if __name__ == '__main__':
example1()
| gpl-3.0 |
FrankWang33/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
aabadie/scikit-learn | examples/mixture/plot_gmm_selection.py | 95 | 3310 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
equialgo/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
gsmaxwell/phase_offset_rx | gnuradio-core/src/examples/pfb/fmtest.py | 17 | 7785 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = gr.sig_source_c (if_rate, # sample rate
gr.GR_SIN_WAVE, # waveform type
lo_freq, #frequency
1.0, # amplitude
0) # DC Offset
mixer = gr.multiply_cc ()
self.connect (self, fmtx, (mixer, 0))
self.connect (lo, (mixer, 1))
self.connect (mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = gr.add_cc ()
for n in xrange(self._N):
sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = gr.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = gr.vector_sink_c()
self.channel = blks2.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = gr.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=gr.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(blks2.standard_squelch(self._audio_rate*10))
self.snks.append(gr.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
smblance/ggplot | ggplot/tests/__init__.py | 8 | 10135 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from nose.tools import with_setup, make_decorator, assert_true
import warnings
figsize_orig = mpl.rcParams["figure.figsize"]
def setup_package():
mpl.rcParams["figure.figsize"] = (11.0, 8.0)
def teardown_package():
mpl.rcParams["figure.figsize"] = figsize_orig
import os
# Testing framework shamelessly stolen from matplotlib...
# Tests which should be run with 'python tests.py' or via 'must be
# included here.
default_test_modules = [
'ggplot.tests.test_basic',
'ggplot.tests.test_readme_examples',
'ggplot.tests.test_ggplot_internals',
'ggplot.tests.test_geom',
'ggplot.tests.test_stat',
'ggplot.tests.test_stat_calculate_methods',
'ggplot.tests.test_stat_summary',
'ggplot.tests.test_geom_rect',
'ggplot.tests.test_geom_dotplot',
'ggplot.tests.test_geom_bar',
'ggplot.tests.test_qplot',
'ggplot.tests.test_geom_lines',
'ggplot.tests.test_geom_linerange',
'ggplot.tests.test_geom_pointrange',
'ggplot.tests.test_faceting',
'ggplot.tests.test_stat_function',
'ggplot.tests.test_scale_facet_wrap',
'ggplot.tests.test_scale_log',
'ggplot.tests.test_reverse',
'ggplot.tests.test_ggsave',
'ggplot.tests.test_theme_mpl',
'ggplot.tests.test_colors',
'ggplot.tests.test_chart_components',
'ggplot.tests.test_legend',
'ggplot.tests.test_element_target',
'ggplot.tests.test_element_text',
'ggplot.tests.test_theme',
'ggplot.tests.test_theme_bw',
'ggplot.tests.test_theme_gray',
'ggplot.tests.test_theme_mpl',
'ggplot.tests.test_theme_seaborn'
]
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install ggplot from source to get the '
'test data.')
def _assert_same_ggplot_image(gg, name, test_file, tol=17):
"""Asserts that the ggplot object produces the right image"""
fig = gg.draw()
return _assert_same_figure_images(fig, name, test_file, tol=tol)
class ImagesComparisonFailure(Exception):
pass
def _assert_same_figure_images(fig, name, test_file, tol=17):
"""Asserts that the figure object produces the right image"""
import os
import shutil
from matplotlib import cbook
from matplotlib.testing.compare import compare_images
from nose.tools import assert_is_not_none
if not ".png" in name:
name = name+".png"
basedir = os.path.abspath(os.path.dirname(test_file))
basename = os.path.basename(test_file)
subdir = os.path.splitext(basename)[0]
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
orig_expected_fname = os.path.join(baseline_dir, name)
actual_fname = os.path.join(result_dir, name)
def make_test_fn(fname, purpose):
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
expected_fname = make_test_fn(actual_fname, 'expected')
# Save the figure before testing whether the original image
# actually exists. This make creating new tests much easier,
# as the result image can afterwards just be copied.
fig.savefig(actual_fname)
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
raise Exception("Baseline image %s is missing" % orig_expected_fname)
err = compare_images(expected_fname, actual_fname,
tol, in_decorator=True)
if err:
msg = 'images not close: {actual:s} vs. {expected:s} (RMS {rms:.2f})'.format(**err)
raise ImagesComparisonFailure(msg)
return err
def get_assert_same_ggplot(test_file):
"""Returns a "assert_same_ggplot" function for these test file
call it like `assert_same_ggplot = get_assert_same_ggplot(__file__)`
"""
def curried(*args, **kwargs):
kwargs["test_file"] = test_file
return _assert_same_ggplot_image(*args, **kwargs)
curried.__doc__ = _assert_same_ggplot_image.__doc__
return curried
def assert_same_elements(first,second, msg=None):
assert_true(len(first) == len(second), "different length")
assert_true(all([a==b for a,b in zip(first,second)]), "Unequal: %s vs %s" % (first, second))
def image_comparison(baseline_images=None, tol=17, extensions=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], tol=17)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImagesComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions:
# ignored, only for compatibility with matplotlibs decorator!
pass
def compare_images_decorator(func):
import inspect
_file = inspect.getfile(func)
def decorated():
# make sure we don't carry over bad images from former tests.
assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \
"python tests.py -v' and add a '@cleanup' decorator." % \
str(plt.get_fignums())
func()
assert len(plt.get_fignums()) == len(baseline_images), "different number of " \
"baseline_images and actuall " \
"plots."
for fignum, baseline in zip(plt.get_fignums(), baseline_images):
figure = plt.figure(fignum)
_assert_same_figure_images(figure, baseline, _file, tol=tol)
# also use the cleanup decorator to close any open figures!
return make_decorator(cleanup(func))(decorated)
return compare_images_decorator
def cleanup(func):
"""Decorator to add cleanup to the testing function
@cleanup
def test_something():
" ... "
Note that `@cleanup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses.
"""
def _teardown():
plt.close('all')
warnings.resetwarnings() #reset any warning filters set in tests
return with_setup(setup=_setup, teardown=_teardown)(func)
# This is called from the cleanup decorator
def _setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
mpl.use('Agg', warn=False) # use Agg backend for these tests
if mpl.get_backend().lower() != "agg" and mpl.get_backend().lower() != "qt4agg":
raise Exception(("Using a wrong matplotlib backend ({0}), which will not produce proper "
"images").format(mpl.get_backend()))
# These settings *must* be hardcoded for running the comparison
# tests
mpl.rcdefaults() # Start with all defaults
mpl.rcParams['text.hinting'] = True
mpl.rcParams['text.antialiased'] = True
#mpl.rcParams['text.hinting_factor'] = 8
# Clear the font caches. Otherwise, the hinting mode can travel
# from one test to another.
backend_agg.RendererAgg._fontd.clear()
backend_pdf.RendererPdf.truetype_font_cache.clear()
backend_svg.RendererSVG.fontd.clear()
# make sure we don't carry over bad plots from former tests
assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \
"python tests.py -v' and add a '@cleanup' decorator." % \
str(plt.get_fignums())
# This is here to run it like "from ggplot.tests import test; test()"
def test(verbosity=1):
"""run the ggplot test suite"""
old_backend = mpl.rcParams['backend']
try:
mpl.use('agg')
import nose
import nose.plugins.builtin
from matplotlib.testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
from nose.plugins import multiprocess
# store the old values before overriding
plugins = []
plugins.append( KnownFailure() )
plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
multiprocess._instantiate_plugins = [KnownFailure]
success = nose.run( defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
mpl.use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
| bsd-2-clause |
seanli9jan/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 25 | 7883 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.cached_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
will-iam/Variant | script/process/ergodicity_scaling.py | 1 | 4083 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import __future__
import parser
import sys
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
import numpy as np
import operator
from collections import *
caseSize = (8192, 8192)
if parser.args.res:
maxAvailableNode = parser.args.res
else:
maxAvailableNode = 8
sizeDataDict = []
for p in range(0, int(np.log2(maxAvailableNode)) + 1):
filterDict = {'nSizeX' : caseSize[0], 'nSizeY' : caseSize[1], 'R' : 64 * 2**p}
print filterDict
data = parser.getData(filterDict)
if len(data):
sizeDataDict.append(data)
if len(sizeDataDict) == 0:
print("No data found.")
sys.exit(1)
loopTimeDict = dict()
for data in sizeDataDict:
for key, value in data.items():
keyDict = parser.extractKey(key)
Nt = keyDict['Nt']
R = keyDict['R']
if keyDict['Ny'] != caseSize[0] or keyDict['Nx'] != caseSize[1]:
print("Error in collected data")
sys.exit(1)
for run in value:
nSDD = run['point'][0] * run['point'][1]
# On several nodes, select only pure SDD, which is the best result.
if R > 64 and nSDD < R:
continue
# Don't remove HyperThreading.
# We assume that hyperthreading with SDD leads to same results as with SDS.
#if R > 64 and nSDD == R and Nt > 1.0:
# continue
# On a single node, select only pure SDS
if R == 64 and nSDD > 1:
continue
loopT = run['loopTime'] * caseSize[0] * caseSize[1] * keyDict['Ni'] / 1000.
if R not in loopTimeDict.keys():
loopTimeDict[R] = list()
loopTimeDict[R].append(loopT)
# And now, we must plot that
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
#ax = fig.add_subplot(211)
#ax.set_xscale('log', basex=2)
#ax.set_yscale('log')
maxSimulationNumber = 42
xArray = range(1, maxSimulationNumber + 1)
'''
#Perfect Scale
loopTimeDict[128] = [k / 2. for k in loopTimeDict[64]]
loopTimeDict[256] = [k / 4. for k in loopTimeDict[64]]
loopTimeDict[512] = [k / 8. for k in loopTimeDict[64]]
'''
for r in sorted(loopTimeDict):
nodeNeeded = r // 64
minT = np.min(loopTimeDict[r])
print("Min Time %s node(s) = %s" % (nodeNeeded, minT))
totalTimeArray = np.zeros(maxSimulationNumber)
for i in xArray:
totalTimeArray[i-1] = minT * (1 + (i * nodeNeeded - 1) // maxAvailableNode)
ax.plot(xArray, totalTimeArray, '-', label="Batch Size %s" % (r // 64))
parser.outputCurve("ergodicity_scaling-%s.dat" % (r//64), xArray, totalTimeArray)
'''
minSize = int(np.sqrt(np.min(syncTimeDict.keys())))
maxSize = int(np.sqrt(np.max(syncTimeDict.keys())))
nodeNumber = (caseSize[0] * caseSize[1] / (maxSize * maxSize))
'''
plt.title('%sx%s batch time with %s node(s) available at the same time.' % (caseSize[0], caseSize[1], maxAvailableNode))
plt.xlabel('Total number of simulation to run')
plt.ylabel('Loop Time')
plt.legend()
'''
bx = fig.add_subplot(212)
bx.set_xscale('log', basex=2)
bx.plot(sorted(sdsWeakDict), [np.min(v) for k, v in sorted(sdsWeakDict.items(), key=operator.itemgetter(0))], 'g+-', label="SDS scaling")
bx.plot(sorted(sddWeakDict), [np.min(v) for k, v in sorted(sddWeakDict.items())], 'b+-', label="SDD scaling")
#bx.plot(sorted(hybridWeakDict), [np.min(v) for k, v in sorted(hybridWeakDict.items())], 'y+-', label="Hybrid scaling")
bx.plot(sorted(sddWeakDict), [firstValueSDD for k in sorted(sddWeakDict.keys())], 'b--', label="SDD ideal")
bx.plot(sorted(sdsWeakDict), [firstValueSDS for k in sorted(sdsWeakDict.keys())], 'g--', label="SDS ideal")
for k in sdsWeakDict:
bx.plot(np.full(len(sdsWeakDict[k]), k), sdsWeakDict[k], 'g+')
for k in sddWeakDict:
bx.plot(np.full(len(sddWeakDict[k]), k), sddWeakDict[k], 'b+')
plt.title('Weak Scaling from %sx%s to %sx%s' % (initSize, initSize, initSize * 2**((maxPower-1) / 2), initSize * 2**((maxPower-1) / 2)) )
plt.xlabel('Core(s)')
plt.ylabel('Loop Time / iteration')
plt.legend()
'''
plt.show()
| mit |
sgenoud/scikit-learn | sklearn/datasets/lfw.py | 6 | 16362 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by refering to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
"is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=None, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split('\t') for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
person_folder = join(data_folder_path, name)
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
pv/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
anhaidgroup/py_entitymatching | py_entitymatching/dask/dask_extract_features.py | 1 | 9597 | import logging
import os
import pandas as pd
import multiprocessing
import numpy as np
import dask
from dask.diagnostics import ProgressBar
from dask import delayed
from cloudpickle import cloudpickle
import tempfile
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.feature.extractfeatures import get_feature_vals_by_cand_split
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
def dask_extract_feature_vecs(candset, attrs_before=None, feature_table=None,
attrs_after=None, verbose=False,
show_progress=True, n_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK
This function extracts feature vectors from a DataFrame (typically a
labeled candidate set).
Specifically, this function uses feature
table, ltable and rtable (that is present in the `candset`'s
metadata) to extract feature vectors.
Args:
candset (DataFrame): The input candidate set for which the features
vectors should be extracted.
attrs_before (list): The list of attributes from the input candset,
that should be added before the feature vectors (defaults to None).
feature_table (DataFrame): A DataFrame containing a list of
features that should be used to compute the feature vectors (
defaults to None).
attrs_after (list): The list of attributes from the input candset
that should be added after the feature vectors (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be displayed (defaults to False).
show_progress (boolean): A flag to indicate whether the progress of
extracting feature vectors must be displayed (defaults to True).
n_chunks (int): The number of partitions to split the candidate set. If it
is set to -1, the number of partitions will be set to the
number of cores in the machine.
Returns:
A pandas DataFrame containing feature vectors.
The DataFrame will have metadata ltable and rtable, pointing
to the same ltable and rtable as the input candset.
Also, the output
DataFrame will have three columns: key, foreign key ltable, foreign
key rtable copied from input candset to the output DataFrame. These
three columns precede the columns mentioned in `attrs_before`.
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `attrs_before` has attributes that
are not present in the input candset.
AssertionError: If `attrs_after` has attribtues that
are not present in the input candset.
AssertionError: If `feature_table` is set to None.
AssertionError: If `n_chunks` is not of type
int.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_extract_features import dask_extract_feature_vecs
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> match_f = em.get_features_for_matching(A, B)
>>> # G is the labeled dataframe which should be converted into feature vectors
>>> H = dask_extract_feature_vecs(G, features=match_f, attrs_before=['title'], attrs_after=['gold_labels'])
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# Validate input parameters
# # We expect the input candset to be of type pandas DataFrame.
validate_object_type(candset, pd.DataFrame, error_prefix='Input cand.set')
# # If the attrs_before is given, Check if the attrs_before are present in
# the input candset
if attrs_before != None:
if not ch.check_attrs_present(candset, attrs_before):
logger.error(
'The attributes mentioned in attrs_before is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_before is not present '
'in the input table')
# # If the attrs_after is given, Check if the attrs_after are present in
# the input candset
if attrs_after != None:
if not ch.check_attrs_present(candset, attrs_after):
logger.error(
'The attributes mentioned in attrs_after is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_after is not present '
'in the input table')
# We expect the feature table to be a valid object
if feature_table is None:
logger.error('Feature table cannot be null')
raise AssertionError('The feature table cannot be null')
# Do metadata checking
# # Mention what metadata is required to the user
ch.log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, '
'ltable, rtable, ltable key, rtable key', verbose)
# # Get metadata
ch.log_info(logger, 'Getting metadata from catalog', verbose)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
cm.get_metadata_for_candset(
candset, logger, verbose)
# # Validate metadata
ch.log_info(logger, 'Validating metadata', verbose)
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# Extract features
# id_list = [(row[fk_ltable], row[fk_rtable]) for i, row in
# candset.iterrows()]
# id_list = [tuple(tup) for tup in candset[[fk_ltable, fk_rtable]].values]
# # Set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # Apply feature functions
ch.log_info(logger, 'Applying feature functions', verbose)
col_names = list(candset.columns)
fk_ltable_idx = col_names.index(fk_ltable)
fk_rtable_idx = col_names.index(fk_rtable)
validate_object_type(n_chunks, int, 'Parameter n_chunks')
validate_chunks(n_chunks)
n_chunks = get_num_partitions(n_chunks, len(candset))
c_splits = np.array_split(candset, n_chunks)
pickled_obj = cloudpickle.dumps(feature_table)
feat_vals_by_splits = []
for i in range(len(c_splits)):
partial_result = delayed(get_feature_vals_by_cand_split)(pickled_obj,
fk_ltable_idx,
fk_rtable_idx, l_df,
r_df, c_splits[i],
False)
feat_vals_by_splits.append(partial_result)
feat_vals_by_splits = delayed(wrap)(feat_vals_by_splits)
if show_progress:
with ProgressBar():
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
feat_vals = sum(feat_vals_by_splits, [])
# Construct output table
feature_vectors = pd.DataFrame(feat_vals, index=candset.index.values)
# # Rearrange the feature names in the input feature table order
feature_names = list(feature_table['feature_name'])
feature_vectors = feature_vectors[feature_names]
ch.log_info(logger, 'Constructing output table', verbose)
# print(feature_vectors)
# # Insert attrs_before
if attrs_before:
if not isinstance(attrs_before, list):
attrs_before = [attrs_before]
attrs_before = gh.list_diff(attrs_before, [key, fk_ltable, fk_rtable])
attrs_before.reverse()
for a in attrs_before:
feature_vectors.insert(0, a, candset[a])
# # Insert keys
feature_vectors.insert(0, fk_rtable, candset[fk_rtable])
feature_vectors.insert(0, fk_ltable, candset[fk_ltable])
feature_vectors.insert(0, key, candset[key])
# # insert attrs after
if attrs_after:
if not isinstance(attrs_after, list):
attrs_after = [attrs_after]
attrs_after = gh.list_diff(attrs_after, [key, fk_ltable, fk_rtable])
attrs_after.reverse()
col_pos = len(feature_vectors.columns)
for a in attrs_after:
feature_vectors.insert(col_pos, a, candset[a])
col_pos += 1
# Reset the index
# feature_vectors.reset_index(inplace=True, drop=True)
# # Update the catalog
cm.init_properties(feature_vectors)
cm.copy_properties(candset, feature_vectors)
# Finally, return the feature vectors
return feature_vectors
| bsd-3-clause |
sinkpoint/dipy | scratch/very_scratch/simulation_comparisons_modified.py | 20 | 13117 | import nibabel
import os
import numpy as np
import dipy as dp
import dipy.core.generalized_q_sampling as dgqs
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dp.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = dp.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
gqfile = simdir+'gq/'+dataname+'.pkl'
gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90))
gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
#print np.vstack(dt_results).shape, np.vstack(gq_results).shape
results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#print results.shape
#results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]):
results = []
out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w')
for j in range(len(sample_data)):
sample = sample_data[j]
simfile = simdata[sample]
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel():
# 0,1,1000,1001,2000,2001,...
s = sim_data[j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5)
tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS')
t0, t1, t2, npa = gqs.npa(s, width = 5)
print >> out, dataname, j, npa, tn.fa()[0]
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
out.close()
run_comparisons()
#run_gq_sims()
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
abalckin/cwavenet | examples/WNvsPWN/show_snr.py | 2 | 2454 | #! /usr/bin/python3
import pylab as plb
import numpy as np
from matplotlib import rc
rc('text', usetex=True)
rc('text.latex', unicode=True)
rc('text.latex', preamble=r'\usepackage[russian]{babel}')
#rc('font',**{'family':'serif'})
rc('font',**{'size':'19'})
res = np.loadtxt('result.txt', delimiter=', ')[0:7]
#import pdb; pdb.set_trace()
#plb.barh(y_pos, performance, xerr=error, align='center', alpha=0.4)
#plb.yscale('linear')
plb.errorbar(res[:, 1], res[:, 5], yerr=res[:, 6], label='Традиционная вейвлет-сеть', linestyle='--', marker='*', color='black')
plb.errorbar(res[:, 1], res[:, 11], yerr=res[:, 12], label='Полиморфная вейвлет-сеть', marker='o', color='green')
plb.errorbar(res[:, 1], res[:, 1], yerr=res[:, 2], label='Отношение сигнал/шум для временного ряда $d(t), S$', color='blue')
#import pdb; pdb.set_trace()
plb.fill_between(res[:, 1], res[:, 1], res[:, 1]-np.max(res[:, 1]), res[:, 1], alpha=0.1, color='blue')
plb.xscale('log')
plb.legend(loc=0)
plb.xlim(res[-1, 1]-0.1, res[0, 1]+20)
plb.ylim(0, 670)
plb.gca().set_xticks(res[:, 1])
#plb.gca().xaxis.set_major_locator(plb.LogLocator(numticks=50))
plb.gca().xaxis.set_major_formatter(plb.ScalarFormatter())
plb.ylabel('Отношение сигнал/шум для временного ряда $\hat{y}(t), M$')
plb.xlabel('Отношение сигнал/шум для временного ряда $d(t), S$')
plb.annotate('Область применения вейвлет-сетей', [7, 310])
plb.show()
polym_higest=res[:, 11]>res[:, 1]
polym_avg=res[polym_higest, 11][1:-2]
std_higest=res[:, 5]>res[:, 1]
std_avg=res[std_higest, 5][:-2]
inp_avg=res[std_higest, 1][:-2]
polym_min=res[polym_higest, 11][1:-2]-res[polym_higest, 12][1:-2]
polym_max=res[polym_higest, 11][1:-2]+res[polym_higest, 12][1:-2]
std_min=res[std_higest, 5][:-2]-res[std_higest, 6][:-2]
std_max=res[std_higest, 5][:-2]+res[std_higest, 6][:-2]
print('Улучшение в среднем на {}%'.format(np.average((polym_avg-std_avg)/std_avg*100)))
print('Улучшение в по диапазону на {0}-{1}%'.format(np.average((polym_min-std_min)/std_min*100),
np.average((polym_max-std_max)/std_max*100)))
polym_avg_db=10*np.log10(polym_avg-inp_avg)
std_avg_db=10*np.log10(std_avg-inp_avg)
print('Улучшение в среднем на {}дб'.format(np.average(polym_avg_db-std_avg_db)))
| gpl-2.0 |
linsalrob/EdwardsLab | phage_protein_blast_genera/tax_violin_plots.py | 1 | 2239 | """
"""
import os
import sys
import argparse
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-f', help='Genome average output file (from genera_per_phage_protein.py', default='/home/redwards/Desktop/gav_all_host.out')
parser.add_argument('-n', help='taxonomy name one of: kingdom / phylum / genus / species', default='genus')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
ynames = {'kingdom' : 'kingdoms', 'phylum' : 'phyla', 'genus' : 'genera', 'species' : 'species'}
col = None
colkey = {'kingdom' : 3, 'phylum' : 4, 'genus' : 5, 'species' : 6}
if args.n not in colkey:
sys.stderr.write("Sorry, taxonomy name must be one of {}\n".format("|".join(list(colkey.keys()))))
sys.exit(-1)
col = colkey[args.n]
want = {'Gut', 'Mouth', 'Nose', 'Skin', 'Lungs'}
data = {}
with open(args.f, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in want:
p[2] = 'All phages'
#continue ## comment or uncomment this to include/exclude all data
if p[2] not in data:
data[p[2]] = []
data[p[2]].append(float(p[col]))
labels = sorted(data.keys())
scores = []
count = 1
ticks = []
for l in labels:
scores.append(data[l])
ticks.append(count)
count += 1
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.boxplot(alldata)
vp = ax.violinplot(scores, showmeans=True)
for i, j in enumerate(vp['bodies']):
if i == 0:
j.set_color('gray')
elif i == 1:
j.set_color('sandybrown')
else:
j.set_color('lightpink')
ax.set_xlabel("Body Site")
ax.set_ylabel("Average number of {}".format(ynames[args.n]))
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation='vertical')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.tight_layout()
#plt.show()
fig.savefig("/home/redwards/Desktop/bodysites.png")
| mit |
bmazin/ARCONS-pipeline | fluxcal/fluxCal.py | 1 | 29931 | #!/bin/python
'''
fluxCal.py
Created by Seth Meeker on 11-21-2012
Modified on 02-16-2015 to perform absolute fluxCal with point sources
Opens ARCONS observation of a spectrophotometric standard star and
associated wavelength cal file, reads in all photons and converts to energies.
Bins photons to generate a spectrum, then divides this into the known spectrum
of the object to create a Sensitivity curve. This curve is then written out to
h5 file.
Flags are associated with each pixel - see headers/pipelineFlags
for descriptions. Note some flags are set here, others are set
later on when creating photon lists.
'''
import sys,os
import tables
import numpy as np
from scipy import interpolate
from scipy.optimize.minpack import curve_fit
import matplotlib.pyplot as plt
from photometry import LightCurve
from util.FileName import FileName
from util.ObsFile import ObsFile
from util import MKIDStd
from util.readDict import readDict
from util.utils import rebin
from util.utils import gaussianConvolution
from util.utils import makeMovie
from util.utils import fitBlackbody
import hotpix.hotPixels as hp
from scipy.optimize.minpack import curve_fit
from scipy import interpolate
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from headers import pipelineFlags
import figureHeader
class FluxCal:
def __init__(self,paramFile,plots=False,verbose=False):
"""
Opens flux file, prepares standard spectrum, and calculates flux factors for the file.
Method is provided in param file. If 'relative' is selected, an obs file with standard star defocused over
the entire array is expected, with accompanying sky file to do sky subtraction.
If any other method is provided, 'absolute' will be done by default, wherein a point source is assumed
to be present. The obs file is then broken into spectral frames with photometry (psf or aper) performed
on each frame to generate the ARCONS observed spectrum.
"""
self.verbose=verbose
self.plots = plots
self.params = readDict()
self.params.read_from_file(paramFile)
run = self.params['run']
sunsetDate = self.params['fluxSunsetLocalDate']
self.fluxTstamp = self.params['fluxTimestamp']
skyTstamp = self.params['skyTimestamp']
wvlSunsetDate = self.params['wvlCalSunsetLocalDate']
wvlTimestamp = self.params['wvlCalTimestamp']
flatCalFileName = self.params['flatCalFileName']
needTimeAdjust = self.params['needTimeAdjust']
self.deadtime = float(self.params['deadtime']) #from firmware pulse detection
self.timeSpacingCut = self.params['timeSpacingCut']
bLoadBeammap = self.params.get('bLoadBeammap',False)
self.method = self.params['method']
self.objectName = self.params['object']
self.r = float(self.params['energyResolution'])
self.photometry = self.params['photometry']
self.centroidRow = self.params['centroidRow']
self.centroidCol = self.params['centroidCol']
self.aperture = self.params['apertureRad']
self.annulusInner = self.params['annulusInner']
self.annulusOuter = self.params['annulusOuter']
self.collectingArea = self.params['collectingArea']
self.startTime = self.params['startTime']
self.intTime = self.params['integrationTime']
fluxFN = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp)
self.fluxFileName = fluxFN.obs()
self.fluxFile = ObsFile(self.fluxFileName)
if self.plots:
self.plotSavePath = os.environ['MKID_PROC_PATH']+os.sep+'fluxCalSolnFiles'+os.sep+run+os.sep+sunsetDate+os.sep+'plots'+os.sep
if not os.path.exists(self.plotSavePath): os.mkdir(self.plotSavePath)
if self.verbose: print "Created directory %s"%self.plotSavePath
obsFNs = [fluxFN]
self.obsList = [self.fluxFile]
if self.startTime in ['',None]: self.startTime=0
if self.intTime in ['',None]: self.intTime=-1
if self.method=="relative":
try:
print "performing Relative Flux Calibration"
skyFN = FileName(run=run,date=sunsetDate,tstamp=skyTstamp)
self.skyFileName = skyFN.obs()
self.skyFile = ObsFile(self.skyFileName)
obsFNs.append(skyFN)
self.obsList.append(self.skyFile)
except:
print "For relative flux calibration a sky file must be provided in param file"
self.__del__()
else:
self.method='absolute'
print "performing Absolute Flux Calibration"
if self.photometry not in ['aperture','PSF']: self.photometry='PSF' #default to PSF fitting if no valid photometry selected
timeMaskFileNames = [fn.timeMask() for fn in obsFNs]
timeAdjustFileName = FileName(run=run).timeAdjustments()
#make filename for output fluxCalSoln file
self.fluxCalFileName = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp).fluxSoln()
print "Creating flux cal: %s"%self.fluxCalFileName
if wvlSunsetDate != '':
wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln()
if flatCalFileName =='':
flatCalFileName=FileName(obsFile=self.fluxFile).flatSoln()
#load cal files for flux file and, if necessary, sky file
for iObs,obs in enumerate(self.obsList):
if bLoadBeammap:
print 'loading beammap',os.environ['MKID_BEAMMAP_PATH']
obs.loadBeammapFile(os.environ['MKID_BEAMMAP_PATH'])
if wvlSunsetDate != '':
obs.loadWvlCalFile(wvlCalFileName)
else:
obs.loadBestWvlCalFile()
obs.loadFlatCalFile(flatCalFileName)
obs.setWvlCutoffs(-1,-1)
if needTimeAdjust:
obs.loadTimeAdjustmentFile(timeAdjustFileName)
timeMaskFileName = timeMaskFileNames[iObs]
print timeMaskFileName
if not os.path.exists(timeMaskFileName):
print 'Running hotpix for ',obs
hp.findHotPixels(obsFile=obs,outputFileName=timeMaskFileName,fwhm=np.inf,useLocalStdDev=True)
print "Flux cal/sky file pixel mask saved to %s"%(timeMaskFileName)
obs.loadHotPixCalFile(timeMaskFileName)
if self.verbose: print "Loaded hot pixel file %s"%timeMaskFileName
#get flat cal binning information since flux cal will need to match it
self.wvlBinEdges = self.fluxFile.flatCalFile.root.flatcal.wavelengthBins.read()
self.nWvlBins = self.fluxFile.flatWeights.shape[2]
self.binWidths = np.empty((self.nWvlBins),dtype=float)
self.binCenters = np.empty((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
self.binWidths[i] = self.wvlBinEdges[i+1]-self.wvlBinEdges[i]
self.binCenters[i] = (self.wvlBinEdges[i]+(self.binWidths[i]/2.0))
if self.method=='relative':
print "Extracting ARCONS flux and sky spectra"
self.loadRelativeSpectrum()
print "Flux Spectrum loaded"
self.loadSkySpectrum()
print "Sky Spectrum loaded"
elif self.method=='absolute':
print "Extracting ARCONS point source spectrum"
self.loadAbsoluteSpectrum()
print "Loading standard spectrum"
try:
self.loadStdSpectrum(self.objectName)
except KeyError:
print "Invalid spectrum object name"
self.__del__()
sys.exit()
print "Generating sensitivity curve"
self.calculateFactors()
print "Sensitivity Curve calculated"
print "Writing fluxCal to file %s"%self.fluxCalFileName
self.writeFactors(self.fluxCalFileName)
if self.plots: self.makePlots()
print "Done"
def __del__(self):
try:
self.fluxFile.close()
self.calFile.close()
except AttributeError:#fluxFile was never defined
pass
def getDeadTimeCorrection(self, obs): #WRONG RIGHT NOW. NEEDS TO HAVE RAW COUNTS SUMMED, NOT CUBE WHICH EXCLUDES NOISE TAIL
if self.verbose: print "Making raw cube to get dead time correction"
cubeDict = obs.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=False, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
wvlBinEdges= cubeDict['wvlBinEdges']
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time = ", np.median(effIntTime)
nWvlBins=len(wvlBinEdges)-1
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#CALCULATE DEADTIME CORRECTION
#NEED TOTAL COUNTS PER SECOND FOR EACH PIXEL TO DO PROPERLY
#ASSUMES SAME CORRECTION FACTOR APPLIED FOR EACH WAVELENGTH, MEANING NO WL DEPENDANCE ON DEAD TIME EFFECT
DTCorr = np.zeros((np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for f in range(0,np.shape(cube)[2]):
#if self.verbose: print cube[:,:,f]
#if self.verbose: print '-----------------------'
DTCorr += cube[:,:,f]
#if self.verbose: print DTCorr
#if self.verbose: print '\n=====================\n'
#Correct for firmware dead time (100us in 2012 ARCONS firmware)
DTCorrNew=DTCorr/(1-DTCorr*self.deadtime)
CorrFactors = DTCorrNew/DTCorr #This is what the frames need to be multiplied by to get their true values
if self.verbose: print "Dead time correction factors: ", CorrFactors
#add third dimension to CorrFactors for broadcasting
CorrFactors = np.reshape(CorrFactors,np.shape(CorrFactors)+(1,))
return CorrFactors
def loadAbsoluteSpectrum(self):
'''
extract the ARCONS measured spectrum of the spectrophotometric standard by breaking data into spectral cube
and performing photometry (aper or psf) on each spectral frame
'''
if self.verbose:print "Making spectral cube"
cubeDict = self.fluxFile.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=True, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time in flux file cube = ", np.median(effIntTime)
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#get dead time correction factors
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
cube*=DTCorr #cube now in units of counts/s and corrected for dead time
if self.plots and not 'figureHeader' in sys.modules:
if self.verbose: print "Saving spectral frames as movie..."
movieCube = np.zeros((self.nWvlBins,np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for i in xrange(self.nWvlBins):
movieCube[i,:,:] = cube[:,:,i]
makeMovie(movieCube,frameTitles=self.binCenters,cbar=True,outName=self.plotSavePath+'FluxCal_Cube_%s.gif'%(self.objectName), normMin=0, normMax=50)
if self.verbose: print "Movie saved in %s"%self.plotSavePath
LCplot=False #light curve pop-ups not compatible with FLuxCal plotting 2/18/15
#if self.photometry=='PSF': LCplot = False
LC = LightCurve.LightCurve(verbose=self.verbose, showPlot=LCplot)
self.fluxSpectrum=np.empty((self.nWvlBins),dtype=float)
self.skySpectrum=np.zeros((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
frame = cube[:,:,i]
if self.verbose: print "%s photometry on frame %i of cube, central wvl = %f Angstroms"%(self.photometry,i,self.binCenters[i])
if self.photometry == 'aperture':
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture, annulus_inner = self.annulusInner, annulus_outer = self.annulusOuter, interpolation="linear")
self.fluxSpectrum[i] = fDict['flux']
self.skySpectrum[i] = fDict['skyFlux']
print "Sky estimate = ", fDict['skyFlux']
else:
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture)
self.fluxSpectrum[i] = fDict['flux']
self.fluxSpectrum=self.fluxSpectrum/self.binWidths/self.collectingArea #spectrum now in counts/s/Angs/cm^2
self.skySpectrum=self.skySpectrum/self.binWidths/self.collectingArea
return self.fluxSpectrum, self.skySpectrum
def loadRelativeSpectrum(self):
self.fluxSpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.fluxEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.fluxFile.getPixelCount(iRow,iCol)
fluxDict = self.fluxFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.fluxSpectra[iRow][iCol],self.fluxEffTime[iRow][iCol] = fluxDict['spectrum'],fluxDict['effIntTime']
self.fluxSpectra = np.array(self.fluxSpectra)
self.fluxEffTime = np.array(self.fluxEffTime)
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
#print "Bin widths = ",self.binWidths
self.fluxSpectra = self.fluxSpectra/self.binWidths/self.fluxEffTime*DTCorr
self.fluxSpectrum = self.calculateMedian(self.fluxSpectra) #find median of subtracted spectra across whole array
return self.fluxSpectrum
def loadSkySpectrum(self):
self.skySpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.skyEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.skyFile.getPixelCount(iRow,iCol)
skyDict = self.skyFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.skySpectra[iRow][iCol],self.skyEffTime[iRow][iCol] = skyDict['spectrum'],skyDict['effIntTime']
self.skySpectra = np.array(self.skySpectra)
self.skyEffTime = np.array(self.skyEffTime)
DTCorr = self.getDeadTimeCorrection(self.skyFile)
self.skySpectra = self.skySpectra/self.binWidths/self.skyEffTime*DTCorr
self.skySpectrum = self.calculateMedian(self.skySpectra) #find median of subtracted spectra across whole array
return self.skySpectrum
def loadStdSpectrum(self, objectName="G158-100"):
#import the known spectrum of the calibrator and rebin to the histogram parameters given
#must be imported into array with dtype float so division later does not have error
std = MKIDStd.MKIDStd()
a = std.load(objectName)
a = std.countsToErgs(a) #convert std spectrum to ergs/s/Angs/cm^2 for BB fitting and cleaning
self.stdWvls = np.array(a[:,0])
self.stdFlux = np.array(a[:,1]) #std object spectrum in ergs/s/Angs/cm^2
if self.plots:
#create figure for plotting standard spectrum modifications
self.stdFig = plt.figure()
self.stdAx = self.stdFig.add_subplot(111)
plt.xlim(3500,12000)
plt.plot(self.stdWvls,self.stdFlux*1E15,linewidth=1,color='grey',alpha=0.75)
convX_rev,convY_rev = self.cleanSpectrum(self.stdWvls,self.stdFlux)
convX = convX_rev[::-1] #convolved spectrum comes back sorted backwards, from long wvls to low which screws up rebinning
convY = convY_rev[::-1]
#rebin cleaned spectrum to flat cal's wvlBinEdges
newa = rebin(convX,convY,self.wvlBinEdges)
rebinnedWvl = np.array(newa[:,0])
rebinnedFlux = np.array(newa[:,1])
if self.plots:
#plot final resampled spectrum
plt.plot(convX,convY*1E15,color='blue')
plt.step(rebinnedWvl,rebinnedFlux*1E15,color = 'black',where='mid')
plt.legend(['%s Spectrum'%self.objectName,'Blackbody Fit','Gaussian Convolved Spectrum','Rebinned Spectrum'],'upper right', numpoints=1)
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Flux (10$^{-15}$ ergs s$^{-1}$ cm$^{-2}$ \r{A}$^{-1}$)")
plt.ylim(0.9*min(rebinnedFlux)*1E15, 1.1*max(rebinnedFlux)*1E15)
plt.savefig(self.plotSavePath+'FluxCal_StdSpectrum_%s.eps'%self.objectName,format='eps')
#convert standard spectrum back into counts/s/angstrom/cm^2
newa = std.ergsToCounts(newa)
self.binnedSpectrum = np.array(newa[:,1])
def cleanSpectrum(self,x,y):
##=============== BB Fit to extend spectrum beyond 11000 Angstroms ==================
fraction = 1.0/3.0
nirX = np.arange(int(x[(1.0-fraction)*len(x)]),20000)
T, nirY = fitBlackbody(x,y,fraction=fraction,newWvls=nirX,tempGuess=5600)
if self.plots: plt.plot(nirX,nirY*1E15,linestyle='--',linewidth=2, color="black",alpha=0.5)
extendedWvl = np.concatenate((x,nirX[nirX>max(x)]))
extendedFlux = np.concatenate((y,nirY[nirX>max(x)]))
##======= Gaussian convolution to smooth std spectrum to MKIDs median resolution ========
newX, newY = gaussianConvolution(extendedWvl,extendedFlux,xEnMin=0.005,xEnMax=6.0,xdE=0.001,fluxUnits = "lambda",r=self.r,plots=False)
return newX, newY
def calculateFactors(self):
"""
Calculate the sensitivity spectrum: the weighting factors that correct the flat calibrated spectra to the real spectra
For relative calibration:
First subtract sky spectrum from ARCONS observed spectrum. Then take median of this spectrum as it should be identical
across the array, assuming the flat cal has done its job. Then divide this into the known spectrum of the object.
For absolute calibration:
self.fluxSpectra already has sky subtraction included. Simply divide this spectrum into the known standard spectrum.
"""
self.subtractedSpectrum = self.fluxSpectrum - self.skySpectrum
self.subtractedSpectrum = np.array(self.subtractedSpectrum,dtype=float) #cast as floats so division does not fail later
if self.method=='relative':
normWvl = 5500 #Angstroms. Choose an arbitrary wvl to normalize the relative correction at
ind = np.where(self.wvlBinEdges >= normWvl)[0][0]-1
self.subtractedSpectrum = self.subtractedSpectrum/(self.subtractedSpectrum[ind]) #normalize
self.binnedSpectrum = self.binnedSpectrum/(self.binnedSpectrum[ind]) #normalize treated Std spectrum while we are at it
#Calculate FluxCal factors
self.fluxFactors = self.binnedSpectrum/self.subtractedSpectrum
#self.fluxFlags = np.zeros(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags = np.empty(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags.fill(pipelineFlags.fluxCal['good']) #Initialise flag array filled with 'good' flags. JvE 5/1/2013.
#set factors that will cause trouble to 1
#self.fluxFlags[self.fluxFactors == np.inf] = 1
self.fluxFlags[self.fluxFactors == np.inf] = pipelineFlags.fluxCal['infWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors == np.inf]=1.0
self.fluxFlags[np.isnan(self.fluxFactors)] = pipelineFlags.fluxCal['nanWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[np.isnan(self.fluxFactors)]=1.0
self.fluxFlags[self.fluxFactors <= 0]=pipelineFlags.fluxCal['LEzeroWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors <= 0]=1.0
def calculateMedian(self, spectra):
spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins])
wvlMedian = np.empty(self.nWvlBins,dtype=float)
for iWvl in xrange(self.nWvlBins):
spectrum = spectra2d[:,iWvl]
goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians
wvlMedian[iWvl] = np.median(goodSpectrum)
return wvlMedian
def makePlots(self):
"""
Output all debugging plots of ARCONS sky and object spectra, known calibrator spectrum, and sensitivity curve
"""
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = self.plotSavePath
fluxCalBase = 'FluxCal_%s'%self.objectName
plotFileName = fluxCalBase+".pdf"
fullFluxPlotFileName = os.path.join(fluxDir,plotFileName)
#uncomment to make some plots for the paper. Proper formatting Will also require figureheader to be imported and for movie making to be turned off
self.paperFig = plt.figure()
self.paperAx = self.paperFig.add_subplot(111)
plt.xlim(4000,11000)
plt.plot(self.binCenters,self.fluxFactors,linewidth=3,color='black')
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Spectral Calibration Curve")
plt.ylim(0,150)
plt.savefig(self.plotSavePath+'FluxCal_Sensitivity_%s.eps'%self.objectName,format='eps')
#save throughput as a .npz file that other code uses when making paper plots
np.savez(self.plotSavePath+'%s_%s_throughput.npz'%(self.objectName.strip(),self.fluxTstamp),throughput=1.0/self.fluxFactors,wvls=self.binCenters)
pp = PdfPages(fullFluxPlotFileName)
#plt.rcParams['font.size'] = 2
wvls = self.binCenters
plt.figure()
ax1 = plt.subplot(111)
ax1.set_title('ARCONS median flat cal\'d flux in counts')
plt.plot(wvls,self.fluxSpectrum)
pp.savefig()
plt.figure()
ax2 = plt.subplot(111)
ax2.set_title('ARCONS median flat cal\'d sky in counts')
plt.plot(wvls,self.skySpectrum)
pp.savefig()
plt.figure()
ax3 = plt.subplot(111)
ax3.set_title('Flux data minus sky in counts')
plt.plot(wvls,self.subtractedSpectrum)
pp.savefig()
plt.figure()
ax4 = plt.subplot(111)
ax4.set_title('Std Spectrum of %s'%(self.objectName))
plt.plot(self.stdWvls,self.stdFlux)
pp.savefig()
plt.figure()
ax5 = plt.subplot(111)
ax5.set_title('Binned Std Spectrum')
plt.plot(wvls,self.binnedSpectrum)
pp.savefig()
plt.figure()
ax6 = plt.subplot(111)
ax6.set_title('Median Sensitivity Spectrum')
ax6.set_xlim((3500,12000))
#ax6.set_ylim((0,5))
plt.plot(wvls,self.fluxFactors)
pp.savefig()
plt.figure()
ax7 = plt.subplot(111)
ax7.set_title('1/Sensitivity (Throughput)')
ax7.set_xlim((3500,12000))
ax7.set_ylim((0,.04))
plt.plot(wvls,1.0/self.fluxFactors)
pp.savefig()
plt.figure()
ax8 = plt.subplot(111)
ax8.set_title('Flux Cal\'d ARCONS Spectrum of Std')
plt.plot(wvls,self.fluxFactors*self.subtractedSpectrum)
pp.savefig()
pp.close()
print "Saved Flux Cal plots to %s"%(fullFluxPlotFileName)
def writeFactors(self,fluxCalFileName):
"""
Write flux cal weights to h5 file
"""
if os.path.isabs(fluxCalFileName) == True:
fullFluxCalFileName = fluxCalFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = os.path.join(scratchDir,'fluxCalSolnFiles')
fullFluxCalFileName = os.path.join(fluxDir,fluxCalFileName)
try:
fluxCalFile = tables.openFile(fullFluxCalFileName,mode='w')
except:
print 'Error: Couldn\'t create flux cal file, ',fullFluxCalFileName
return
calgroup = fluxCalFile.createGroup(fluxCalFile.root,'fluxcal','Table of flux calibration weights by wavelength')
caltable = tables.Array(calgroup,'weights',object=self.fluxFactors,title='Flux calibration Weights indexed by wavelengthBin')
flagtable = tables.Array(calgroup,'flags',object=self.fluxFlags,title='Flux cal flags indexed by wavelengthBin. 0 is Good')
bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array')
fluxCalFile.flush()
fluxCalFile.close()
print "Finished Flux Cal, written to %s"%(fullFluxCalFileName)
def cleanSpectrum_old(self,x,y,objectName):
'''
function to take high resolution spectrum of standard star, extend IR coverage with
an exponential tail, then rebin down to ARCONS resolution. This function has since been
deprecated with the current cleanSpectrum which uses a BB fit to extend IR coverage,
and does the rebinning using a gaussian convolution. This is left in for reference.
'''
#locations and widths of absorption features in Angstroms
#features = [3890,3970,4099,4340,4860,6564,6883,7619]
#widths = [50,50,50,50,50,50,50,50]
#for i in xrange(len(features)):
# #check for absorption feature in std spectrum
# ind = np.where((x<(features[i]+15)) & (x>(features[i]-15)))[0]
# if len(ind)!=0:
# ind = ind[len(ind)/2]
# #if feature is found (flux is higher on both sides of the specified wavelength where the feature should be)
# if y[ind]<y[ind+1] and y[ind]<y[ind-1]:
# #cut out width[i] around feature[i]
# inds = np.where((x >= features[i]+widths[i]) | (x <= features[i]-widths[i]))
# x = x[inds]
# y = y[inds]
#fit a tail to the end of the spectrum to interpolate out to desired wavelength in angstroms
fraction = 3.0/4.0
newx = np.arange(int(x[fraction*len(x)]),20000)
slopeguess = (np.log(y[-1])-np.log(y[fraction*len(x)]))/(x[-1]-x[fraction*len(x)])
print "Guess at exponential slope is %f"%(slopeguess)
guess_a, guess_b, guess_c = float(y[fraction*len(x)]), x[fraction*len(x)], slopeguess
guess = [guess_a, guess_b, guess_c]
fitx = x[fraction*len(x):]
fity = y[fraction*len(x):]
exp_decay = lambda fx, A, x0, t: A * np.exp((fx-x0) * t)
params, cov = curve_fit(exp_decay, fitx, fity, p0=guess, maxfev=2000)
A, x0, t= params
print "A = %s\nx0 = %s\nt = %s\n"%(A, x0, t)
best_fit = lambda fx: A * np.exp((fx-x0)*t)
calcx = np.array(newx,dtype=float)
newy = best_fit(calcx)
#func = interpolate.splrep(x[fration*len(x):],y[fraction*len(x):],s=smooth)
#newx = np.arange(int(x[fraction*len(x)]),self.wvlBinEdges[-1])
#newy = interpolate.splev(newx,func)
wl = np.concatenate((x,newx[newx>max(x)]))
flux = np.concatenate((y,newy[newx>max(x)]))
#new method, rebin data to grid of wavelengths generated from a grid of evenly spaced energy bins
#R=7.0 at 4500
#R=E/dE -> dE = R/E
dE = 0.3936 #eV
start = 1000 #Angs
stop = 20000 #Angs
enBins = ObsFile.makeWvlBins(dE,start,stop)
rebinned = rebin(wl,flux,enBins)
re_wl = rebinned[:,0]
re_flux = rebinned[:,1]
#plt.plot(re_wl,re_flux,color='r')
re_wl = re_wl[np.isnan(re_flux)==False]
re_flux = re_flux[np.isnan(re_flux)==False]
start1 = self.wvlBinEdges[0]
stop1 = self.wvlBinEdges[-1]
#regrid downsampled data
new_wl = np.arange(start1,stop1)
#print re_wl
#print re_flux
#print new_wl
#weight=1.0/(re_flux)**(2/1.00)
print len(re_flux)
weight = np.ones(len(re_flux))
#decrease weights near peak
ind = np.where(re_flux == max(re_flux))[0]
weight[ind] = 0.3
for p in [1,2,3]:
if p==1:
wt = 0.3
elif p==2:
wt = 0.6
elif p==3:
wt = 0.7
try:
weight[ind+p] = wt
except IndexError:
pass
try:
if ind-p >= 0:
weight[ind-p] = wt
except IndexError:
pass
weight[-4:] = 1.0
#weight = [0.7,1,0.3,0.3,0.5,0.7,1,1,1]
#print len(weight)
#weight = re_flux/min(re_flux)
#weight = 1.0/weight
#weight = weight/max(weight)
#print weight
f = interpolate.splrep(re_wl,re_flux,w=weight,k=3,s=max(re_flux)**1.71)
new_flux = interpolate.splev(new_wl,f,der=0)
return new_wl, new_flux
if __name__ == '__main__':
try:
paramFile = sys.argv[1]
except:
paramFile = '/home/srmeeker/ARCONS-pipeline/params/fluxCal.dict'
fc = FluxCal(paramFile, plots=True, verbose=True)
| gpl-2.0 |
nomadcube/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
shikhar413/openmc | tests/regression_tests/diff_tally/test.py | 10 | 4122 | import glob
import os
import pandas as pd
import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
class DiffTallyTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set settings explicitly
self._model.settings.batches = 3
self._model.settings.inactive = 0
self._model.settings.particles = 100
self._model.settings.source = openmc.Source(space=openmc.stats.Box(
[-160, -160, -183], [160, 160, 183]))
self._model.settings.temperature['multipole'] = True
filt_mats = openmc.MaterialFilter((1, 3))
filt_eout = openmc.EnergyoutFilter((0.0, 0.625, 20.0e6))
# We want density derivatives for both water and fuel to get coverage
# for both fissile and non-fissile materials.
d1 = openmc.TallyDerivative(derivative_id=1)
d1.variable = 'density'
d1.material = 3
d2 = openmc.TallyDerivative(derivative_id=2)
d2.variable = 'density'
d2.material = 1
# O-16 is a good nuclide to test against because it is present in both
# water and fuel. Some routines need to recognize that they have the
# perturbed nuclide but not the perturbed material.
d3 = openmc.TallyDerivative(derivative_id=3)
d3.variable = 'nuclide_density'
d3.material = 1
d3.nuclide = 'O16'
# A fissile nuclide, just for good measure.
d4 = openmc.TallyDerivative(derivative_id=4)
d4.variable = 'nuclide_density'
d4.material = 1
d4.nuclide = 'U235'
# Temperature derivatives.
d5 = openmc.TallyDerivative(derivative_id=5)
d5.variable = 'temperature'
d5.material = 1
derivs = [d1, d2, d3, d4, d5]
# Cover the flux score.
for i in range(5):
t = openmc.Tally()
t.scores = ['flux']
t.filters = [filt_mats]
t.derivative = derivs[i]
self._model.tallies.append(t)
# Cover supported scores with a collision estimator.
for i in range(5):
t = openmc.Tally()
t.scores = ['total', 'absorption', 'scatter', 'fission', 'nu-fission']
t.filters = [filt_mats]
t.nuclides = ['total', 'U235']
t.derivative = derivs[i]
self._model.tallies.append(t)
# Cover an analog estimator.
for i in range(5):
t = openmc.Tally()
t.scores = ['absorption']
t.filters = [filt_mats]
t.estimator = 'analog'
t.derivative = derivs[i]
self._model.tallies.append(t)
# Energyout filter and total nuclide for the density derivatives.
for i in range(2):
t = openmc.Tally()
t.scores = ['nu-fission', 'scatter']
t.filters = [filt_mats, filt_eout]
t.nuclides = ['total', 'U235']
t.derivative = derivs[i]
self._model.tallies.append(t)
# Energyout filter without total nuclide for other derivatives.
for i in range(2, 5):
t = openmc.Tally()
t.scores = ['nu-fission', 'scatter']
t.filters = [filt_mats, filt_eout]
t.nuclides = ['U235']
t.derivative = derivs[i]
self._model.tallies.append(t)
def _get_results(self):
# Read the statepoint and summary files.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Extract the tally data as a Pandas DataFrame.
df = pd.DataFrame()
for t in sp.tallies.values():
df = df.append(t.get_pandas_dataframe(), ignore_index=True)
# Extract the relevant data as a CSV string.
cols = ('d_material', 'd_nuclide', 'd_variable', 'score', 'mean',
'std. dev.')
return df.to_csv(None, columns=cols, index=False, float_format='%.7e')
def test_diff_tally():
harness = DiffTallyTestHarness('statepoint.3.h5')
harness.main()
| mit |
cgheller/splotch | labeltool/splotchColormap.py | 4 | 5402 | #!/usr/bin/env python
# Generate overlay images in PNG format with transparancy which can be
# used to label Splotch frames. This script can be called as a
# standalone program, see below for details. To label an entire
# directory of Splotch frames, use the driver script <splotchLabelFrames.sh>.
#
# (Klaus Reuter, RZG, Sep 2011)
def splotchColormap(time=-1.0, # for time>0, a time stamp is printed in the upper left corner
redshift=-1.0, # for redshift>0, a redshift stamp is printed
valMin=0.1, # minimum value for the log colorscale
valMax=1.e4, # maximum value for the log colorscale
outfile="overlay.png", # default file name of the overlay to be created
xinches=12, # width of the image | at 100 DPI, this corresponds to
yinches=8, # height of the image | the dimensions 1200x800
myFontSize="large",
myFontColor="white",
putMinerva=False): # place the MPG minerva logo in the top right corner
# import necessary modules
import numpy as np
from matplotlib import pyplot
import matplotlib as mpl
from subprocess import call
from math import pow
# *** set font properties for annotations ***
fprops=mpl.font_manager.FontProperties()
fprops.set_size(myFontSize)
#fprops.set_weight("bold")
# *** set up the matplotlib colormap based on a Splotch colormap ***
#$ cat OldSplotch.pal
#OldSplotch
#0100
#3
# 0 0 255
#128 255 128
#255 0 0
# See <http://matplotlib.sourceforge.net/api/colors_api.html>
# to understand what's going on ...
# <OldSplotch.pal> corresponds to:
OldSplotch = {'red': ((0.0, 0.0, 0.0), (0.5, 0.5, 0.5), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0), (0.5, 0.5, 0.5), (1.0, 0.0, 0.0))}
colormap = mpl.colors.LinearSegmentedColormap('colormap', OldSplotch)
# TODO implement a reader for Splotch palette files
# *** set up the figure ***
fig = pyplot.figure(figsize=(xinches,yinches))
# *** set up the colorbar ***
ax1 = fig.add_axes([0.90, 0.05, 0.02, 0.5])
norm = mpl.colors.LogNorm(vmin=valMin, vmax=valMax)
form = mpl.ticker.LogFormatterMathtext()
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=colormap, norm=norm,
format=form, orientation='vertical')
# manipulate the style of the ticklabels, which requires a loop
for tl in cb1.ax.get_yticklabels():
tl.set_fontsize(myFontSize)
tl.set_color(myFontColor)
cb1.set_label('Temperature [K]', fontproperties=fprops, color=myFontColor)
# *** set up the time/redshift variable ***
if (time>=0.0):
timeString="age of universe=%.3f" % (time, )
timeString=timeString+" Gyr"
pyplot.figtext(x=0.025, y=0.950, s=timeString, fontdict=None,
fontproperties=fprops, color=myFontColor)
#
if (redshift>0):
timeString="redshift=%.3f" % (redshift, )
pyplot.figtext(x=0.025, y=0.910, s=timeString, fontdict=None,
fontproperties=fprops, color=myFontColor)
# Minerva needs an intermediate call of the ImageMagick tools
if putMinerva:
plotFile="./splotchColormapTmp.png"
else:
plotFile=outfile
# *** finally, plot the image and write it to a png file ***
pyplot.plot()
F=pyplot.gcf()
myDPI=100
F.savefig(plotFile, transparent=True, dpi=myDPI)
# *** put a logo (e.g. MPG Minerva) on top using ImageMagick convert ***
if putMinerva:
minervaFile="__INSERT_VALID_PATH__/minerva-white-96.png"
xoffset=str(int( (xinches*myDPI)*0.895 ))
yoffset=str(int( (yinches*myDPI)*0.005 ))
#print (xoffset, yoffset)
convertCommand="/usr/bin/env convert "+plotFile+" "+minervaFile+" -geometry +"+xoffset+"+"+yoffset+" -composite -format png "+outfile
call(convertCommand, shell=True)
# *** END SplotchColormap() ***
#
# *** Allow this Python module to be run as a standalone script. ***
#
if __name__ == "__main__":
import sys
import getopt
#
try:
opts, args = getopt.getopt(sys.argv[1:],
"t:r:c:d:o:", # the "-" options, below are the "--" options
["time=", "redshift=", "colormin=", "colormax=", "outfile="])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
#
myOutFile = "overlay.png"
myTime = -1.0
myRedshift = -1.0
myMinVal = 1
myMaxVal = 100
#
for o, a in opts:
# print (o,a)
if o in ("-t", "--time"):
myTime = float(a)
elif o in ("-r", "--redshift"):
myRedshift = float(a)
elif o in ("-c", "--colormin"):
myMinVal = pow(10.0, float(a))
elif o in ("-d", "--colormax"):
myMaxVal = pow(10.0, float(a))
elif o in ("-o", "--outfile"):
myOutFile = a
else:
assert False, "unhandled option"
#
splotchColormap(outfile=myOutFile,
time=myTime,
redshift=myRedshift,
valMin=myMinVal,
valMax=myMaxVal)
# EOF
| gpl-2.0 |
APMonitor/arduino | 2_Regression/2nd_order_MIMO/GEKKO/tclab_2nd_order_linear.py | 1 | 3283 | import numpy as np
import time
import matplotlib.pyplot as plt
import random
# get gekko package with:
# pip install gekko
from gekko import GEKKO
import pandas as pd
# import data
data = pd.read_csv('data.txt')
tm = data['Time (sec)'].values
Q1s = data[' Heater 1'].values
Q2s = data[' Heater 2'].values
T1s = data[' Temperature 1'].values
T2s = data[' Temperature 2'].values
#########################################################
# Initialize Model as Estimator
#########################################################
m = GEKKO(name='tclab-mhe')
#m.server = 'http://127.0.0.1' # if local server is installed
# 120 second time horizon, 40 steps
m.time = tm
# Parameters to Estimate
K1 = m.FV(value=0.5)
K1.STATUS = 1
K1.FSTATUS = 0
K1.LOWER = 0.1
K1.UPPER = 1.0
K2 = m.FV(value=0.3)
K2.STATUS = 1
K2.FSTATUS = 0
K2.LOWER = 0.1
K2.UPPER = 1.0
K3 = m.FV(value=0.1)
K3.STATUS = 1
K3.FSTATUS = 0
K3.LOWER = 0.0001
K3.UPPER = 1.0
tau12 = m.FV(value=150)
tau12.STATUS = 1
tau12.FSTATUS = 0
tau12.LOWER = 50.0
tau12.UPPER = 250
tau3 = m.FV(value=15)
tau3.STATUS = 0
tau3.FSTATUS = 0
tau3.LOWER = 10
tau3.UPPER = 20
# Measured inputs
Q1 = m.MV(value=0)
Q1.FSTATUS = 1 # measured
Q1.value = Q1s
Q2 = m.MV(value=0)
Q2.FSTATUS = 1 # measured
Q2.value = Q2s
# Ambient temperature
Ta = m.Param(value=23.0) # degC
# State variables
TH1 = m.SV(value=T1s[0])
TH2 = m.SV(value=T2s[0])
# Measurements for model alignment
TC1 = m.CV(value=T1s)
TC1.STATUS = 1 # minimize error between simulation and measurement
TC1.FSTATUS = 1 # receive measurement
TC1.MEAS_GAP = 0.1 # measurement deadband gap
TC2 = m.CV(value=T1s[0])
TC2.STATUS = 1 # minimize error between simulation and measurement
TC2.FSTATUS = 1 # receive measurement
TC2.MEAS_GAP = 0.1 # measurement deadband gap
TC2.value = T2s
# Heat transfer between two heaters
DT = m.Intermediate(TH2-TH1)
# Empirical correlations
m.Equation(tau12 * TH1.dt() + (TH1-Ta) == K1*Q1 + K3*DT)
m.Equation(tau12 * TH2.dt() + (TH2-Ta) == K2*Q2 - K3*DT)
m.Equation(tau3 * TC1.dt() + TC1 == TH1)
m.Equation(tau3 * TC2.dt() + TC2 == TH2)
# Global Options
m.options.IMODE = 5 # MHE
m.options.EV_TYPE = 2 # Objective type
m.options.NODES = 3 # Collocation nodes
m.options.SOLVER = 3 # IPOPT
m.options.COLDSTART = 0 # COLDSTART on first cycle
# Predict Parameters and Temperatures
# use remote=False for local solve
m.solve()
# Create plot
plt.figure(figsize=(10,7))
ax=plt.subplot(2,1,1)
ax.grid()
plt.plot(tm,T1s,'ro',label=r'$T_1$ measured')
plt.plot(tm,TC1.value,'k-',label=r'$T_1$ predicted')
plt.plot(tm,T2s,'bx',label=r'$T_2$ measured')
plt.plot(tm,TC2.value,'k--',label=r'$T_2$ predicted')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(2,1,2)
ax.grid()
plt.plot(tm,Q1s,'r-',label=r'$Q_1$')
plt.plot(tm,Q2s,'b:',label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
# Print optimal values
print('K1: ' + str(K1.newval))
print('K2: ' + str(K2.newval))
print('K3: ' + str(K3.newval))
print('tau12: ' + str(tau12.newval))
print('tau3: ' + str(tau3.newval))
# Save figure
plt.savefig('tclab_estimation.png')
plt.show()
| apache-2.0 |
DistrictDataLabs/yellowbrick | yellowbrick/contrib/scatter.py | 1 | 11862 | # yellowbrick.contrib.scatter
# Implements a 2d scatter plot for feature analysis.
#
# Author: Nathan Danielsen
# Created: Fri Feb 26 19:40:00 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: scatter.py [a89633e] benjamin@bengfort.com $
"""
Implements a 2D scatter plot for feature analysis.
"""
##########################################################################
# Imports
##########################################################################
import itertools
import numpy as np
from yellowbrick.features.base import DataVisualizer
from yellowbrick.utils import is_dataframe, is_structured_array
from yellowbrick.utils import has_ndarray_int_columns
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.style.colors import resolve_colors
##########################################################################
# Quick Methods
##########################################################################
def scatterviz(
X,
y=None,
ax=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs
):
"""Displays a bivariate scatter plot.
This helper function is a quick wrapper to utilize the ScatterVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, default: None
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of two features or columns.
More than that will raise an error.
classes : list of strings, default: None
The names of the classes in the target
color : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
viz : ScatterVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = ScatterVisualizer(
ax=ax,
features=features,
classes=classes,
color=color,
colormap=colormap,
markers=markers,
alpha=alpha,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the visualizer object
return visualizer
##########################################################################
# Static ScatterVisualizer Visualizer
##########################################################################
class ScatterVisualizer(DataVisualizer):
"""
ScatterVisualizer is a bivariate feature data visualization algorithm that
plots using the Cartesian coordinates of each point.
Parameters
----------
ax : a matplotlib plot, default: None
The axis to plot the figure on.
x : string, default: None
The feature name that corresponds to a column name or index postion
in the matrix that will be plotted against the x-axis
y : string, default: None
The feature name that corresponds to a column name or index postion
in the matrix that will be plotted against the y-axis
features : a list of two feature names to use, default: None
List of two features that correspond to the columns in the array.
The order of the two features correspond to X and Y axes on the
graph. More than two feature names or columns will raise an error.
If a DataFrame is passed to fit and features is None, feature names
are selected that are the columns of the DataFrame.
classes : a list of class names for the legend, default: None
If classes is None and a y value is passed to fit then the classes
are selected from the target vector.
color : optional list or tuple of colors to colorize points, default: None
Use either color to colorize the points on a per class basis or
colormap to color them on a continuous scale.
colormap : optional string or matplotlib cmap to colorize points, default: None
Use either color to colorize the points on a per class basis or
colormap to color them on a continuous scale.
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : keyword arguments passed to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
def __init__(
self,
ax=None,
x=None,
y=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs
):
"""
Initialize the base scatter with many of the options required in order
to make the visualization work.
"""
super(ScatterVisualizer, self).__init__(
ax=ax,
features=features,
classes=classes,
color=color,
colormap=colormap,
**kwargs
)
self.x = x
self.y = y
self.alpha = alpha
self.markers = itertools.cycle(
kwargs.pop("markers", (",", "+", "o", "*", "v", "h", "d"))
)
self.color = color
self.colormap = colormap
if self.x is not None and self.y is not None and self.features is not None:
raise YellowbrickValueError("Please specify x,y or features, not both.")
if self.x is not None and self.y is not None and self.features is None:
self.features = [self.x, self.y]
# Ensure with init that features doesn't have more than two features
if features is not None:
if len(features) != 2:
raise YellowbrickValueError(
"ScatterVisualizer only accepts two features."
)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the parallel coords
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with 2 features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
_, ncols = X.shape
# NOTE: Do not call super for this class, it conflicts with the fit.
# Setting these variables is similar to the old behavior of DataVisualizer.
# TODO: refactor to make use of the new DataVisualizer functionality
self.features_ = self.features
self.classes_ = self.classes
if ncols == 2:
X_two_cols = X
if self.features_ is None:
self.features_ = ["Feature One", "Feature Two"]
# Handle the feature names if they're None.
elif self.features_ is not None and is_dataframe(X):
X_two_cols = X[self.features_].values
# handle numpy named/ structured array
elif self.features_ is not None and is_structured_array(X):
X_selected = X[self.features_]
X_two_cols = X_selected.copy().view(
(np.float64, len(X_selected.dtype.names))
)
# handle features that are numeric columns in ndarray matrix
elif self.features_ is not None and has_ndarray_int_columns(self.features_, X):
f_one, f_two = self.features_
X_two_cols = X[:, [int(f_one), int(f_two)]]
else:
raise YellowbrickValueError(
"""
ScatterVisualizer only accepts two features, please
explicitly set these two features in the init kwargs or
pass a matrix/ dataframe in with only two columns."""
)
# Store the classes for the legend if they're None.
if self.classes_ is None:
# TODO: Is this the most efficient method?
self.classes_ = [str(label) for label in np.unique(y)]
# Draw the instances
self.draw(X_two_cols, y, **kwargs)
# Fit always returns self.
return self
def draw(self, X, y, **kwargs):
"""Called from the fit method, this method creates a scatter plot that
draws each instance as a class or target colored point, whose location
is determined by the feature data set.
"""
# Set the axes limits
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([-1, 1])
# set the colors
color_values = resolve_colors(
n_colors=len(self.classes_), colormap=self.colormap, colors=self.color
)
colors = dict(zip(self.classes_, color_values))
# Create a data structure to hold the scatter plot representations
to_plot = {}
for kls in self.classes_:
to_plot[kls] = [[], []]
# Add each row of the data set to to_plot for plotting
# TODO: make this an independent function for override
for i, row in enumerate(X):
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
x_, y_ = row_[0], row_[1]
kls = self.classes_[y[i]]
to_plot[kls][0].append(x_)
to_plot[kls][1].append(y_)
# Add the scatter plots from the to_plot function
# TODO: store these plots to add more instances to later
# TODO: make this a separate function
for i, kls in enumerate(self.classes_):
self.ax.scatter(
to_plot[kls][0],
to_plot[kls][1],
marker=next(self.markers),
color=colors[kls],
label=str(kls),
alpha=self.alpha,
**kwargs
)
self.ax.axis("equal")
def finalize(self, **kwargs):
"""
Adds a title and a legend and ensures that the axis labels are set as
the feature names being visualized.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Divide out the two features
feature_one, feature_two = self.features_
# Set the title
self.set_title(
"Scatter Plot: {0} vs {1}".format(str(feature_one), str(feature_two))
)
# Add the legend
self.ax.legend(loc="best")
self.ax.set_xlabel(str(feature_one))
self.ax.set_ylabel(str(feature_two))
# Alias for ScatterViz
ScatterViz = ScatterVisualizer
| apache-2.0 |
santiago-salas-v/walas | node_images.py | 1 | 1746 | import matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
patch1 = matplotlib.patches.Circle(
[0.5,0.5],0.05
)
patch2 = matplotlib.patches.Rectangle(
[0.3,0.3],0.4, 0.4, alpha=0.5,
fill=False, edgecolor='black',
linestyle = '--'
)
arrow1 = matplotlib.patches.Arrow(
0, 0.5,0.45,0, width=0.05,
color='black'
)
arrow2 = matplotlib.patches.Arrow(
0.55, 0.5,0.45,0, width=0.05,
color='black'
)
line1 = matplotlib.lines.Line2D(
[0.5,0.5], [0,0.45],
linestyle='--', color='black'
)
text1 = matplotlib.text.Text(
0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$'
)
text2 = matplotlib.text.Text(
0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$'
)
for artist in [
patch1,patch2,arrow1,arrow2,
line1,text1,text2
]:
ax.add_artist(artist)
ax.set_frame_on(False)
ax.set_axis_off()
ax.set_aspect(1.0)
fig.
fig = plt.figure()
ax = fig.add_subplot(111)
patch1 = matplotlib.patches.Circle(
[0.5,0.5],0.05
)
patch2 = matplotlib.patches.Rectangle(
[0.3,0.3],0.4, 0.4, alpha=0.5,
fill=False, edgecolor='black',
linestyle = '--'
)
arrow1 = matplotlib.patches.Arrow(
0, 0.5,0.45,0, width=0.05,
color='black'
)
arrow2 = matplotlib.patches.Arrow(
0.55, 0.5,0.45,0, width=0.05,
color='black'
)
arrow3 = matplotlib.patches.Arrow(
0.5, 0.0, 0,0.45, width=0.05,
color='black'
)
text1 = matplotlib.text.Text(
0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$'
)
text2 = matplotlib.text.Text(
0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$'
)
text3 = matplotlib.text.Text(
0.55, 0.1, '$n_{Ar}$\n$V_r$'
)
for artist in [
patch1,patch2,arrow1,arrow2,
arrow3,text1,text2,text3
]:
ax.add_artist(artist)
ax.set_frame_on(False)
ax.set_axis_off()
ax.set_aspect(1.0) | mit |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/dask/dataframe/io/tests/test_parquet.py | 2 | 45993 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import dask
import dask.multiprocessing
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq
from dask.dataframe.io.parquet import _parse_pandas_metadata
from dask.utils import natural_sort_key
try:
import fastparquet
except ImportError:
fastparquet = False
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
try:
import pyarrow as pa
check_pa_divs = pa.__version__ >= LooseVersion('0.9.0')
except ImportError:
check_pa_divs = False
def should_check_divs(engine):
if engine == 'fastparquet':
return True
elif engine == 'pyarrow' and check_pa_divs:
return True
return False
nrows = 40
npartitions = 15
df = pd.DataFrame({'x': [i * 7 % 5 for i in range(nrows)], # Not sorted
'y': [i * 2.5 for i in range(nrows)] # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name='myindex'))
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(params=[pytest.mark.skipif(not fastparquet, 'fastparquet',
reason='fastparquet not found'),
pytest.mark.skipif(not pq, 'pyarrow',
reason='pyarrow not found')])
def engine(request):
return request.param
def check_fastparquet():
if not fastparquet:
pytest.skip('fastparquet not found')
def check_pyarrow():
if not pq:
pytest.skip('pyarrow not found')
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {'pyarrow', 'fastparquet'}
marks = {(w, r): [] for w in backends for r in backends}
# Skip if uninstalled
for name, exists in [('fastparquet', fastparquet), ('pyarrow', pq)]:
val = pytest.mark.skip(reason='%s not found' % name)
if not exists:
for k in marks:
if name in k:
marks[k].append(val)
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split('_', 1)
key = tuple(rest.split('_'))
if (kind not in ('xfail', 'skip') or len(key) > 2 or
set(key).difference(backends)):
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(('write_engine', 'read_engine'),
[pytest.param(*k, marks=tuple(v))
for (k, v) in sorted(marks.items())])
pyarrow_fastparquet_msg = "fastparquet fails reading pyarrow written directories"
write_read_engines_xfail = write_read_engines(xfail_pyarrow_fastparquet=pyarrow_fastparquet_msg)
@write_read_engines_xfail
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice(['hello', 'yo', 'people'], size=1000).astype("O")})
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert '_common_metadata' in files
assert 'part.0.parquet' in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler='sync').reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@write_read_engines_xfail
def test_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# Infer divisions for engines/versions that support it
ddf2 = dd.read_parquet(fn, engine=read_engine, infer_divisions=should_check_divs(read_engine))
assert_eq(ddf, ddf2, check_divisions=should_check_divs(read_engine))
# infer_divisions False
ddf2_no_divs = dd.read_parquet(fn, engine=read_engine, infer_divisions=False)
assert_eq(ddf.clear_divisions(), ddf2_no_divs, check_divisions=True)
# infer_divisions unspecified
ddf2_default = dd.read_parquet(fn, engine=read_engine)
if read_engine == 'fastparquet':
# The fastparquet engine infers divisions by default because it only supports reading datasets that have a
# global _metadata file
assert_eq(ddf, ddf2_default, check_divisions=True)
else:
# pyarrow does not infer divisions by default because doing so requires reading metadata from each file in
# the dataset, which could be expensive
assert_eq(ddf.clear_divisions(), ddf2_default, check_divisions=True)
@pytest.mark.parametrize('index', [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({'a': ['a', 'b', 'b'], 'b': [4, 5, 6]})[:0]
if index:
df.set_index('a', inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
if os.path.exists(os.path.join(fn, '_metadata')):
os.unlink(os.path.join(fn, '_metadata'))
files = os.listdir(fn)
assert '_metadata' not in files
# Infer divisions for engines/versions that support it
ddf2 = dd.read_parquet(os.path.join(fn, '*'), engine=read_engine,
infer_divisions=should_check_divs(write_engine) and should_check_divs(read_engine))
assert_eq(ddf, ddf2, check_divisions=should_check_divs(write_engine) and should_check_divs(read_engine))
# No divisions
ddf2_no_divs = dd.read_parquet(os.path.join(fn, '*'), engine=read_engine, infer_divisions=False)
assert_eq(ddf.clear_divisions(), ddf2_no_divs, check_divisions=True)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted([os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith('_metadata')],
key=natural_sort_key)
# Infer divisions for engines/versions that support it
ddf2 = dd.read_parquet(files, engine=read_engine,
infer_divisions=should_check_divs(write_engine) and should_check_divs(read_engine))
assert_eq(ddf, ddf2, check_divisions=should_check_divs(write_engine) and should_check_divs(read_engine))
# No divisions
ddf2_no_divs = dd.read_parquet(files, engine=read_engine, infer_divisions=False)
assert_eq(ddf.clear_divisions(), ddf2_no_divs, check_divisions=True)
@write_read_engines_xfail
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Emtpy columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine, infer_divisions=should_check_divs(read_engine)),
ddf[[]], check_divisions=should_check_divs(read_engine))
# No divisions
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine, infer_divisions=False),
ddf[[]].clear_divisions(), check_divisions=True)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=['x'], engine=read_engine, infer_divisions=should_check_divs(read_engine)),
ddf[['x']], check_divisions=should_check_divs(read_engine))
# No divisions
assert_eq(dd.read_parquet(fn, columns=['x'], engine=read_engine, infer_divisions=False),
ddf[['x']].clear_divisions(), check_divisions=True)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, index='myindex', columns=['x'], engine=read_engine,
infer_divisions=should_check_divs(read_engine)),
ddf[['x']], check_divisions=should_check_divs(read_engine))
# No divisions
assert_eq(dd.read_parquet(fn, index='myindex', columns=['x'], engine=read_engine,
infer_divisions=False),
ddf[['x']].clear_divisions(), check_divisions=True)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, index='myindex', columns=['x', 'y'], engine=read_engine,
infer_divisions=should_check_divs(read_engine)),
ddf, check_divisions=should_check_divs(read_engine))
# No divisions
assert_eq(dd.read_parquet(fn, index='myindex', columns=['x', 'y'], engine=read_engine,
infer_divisions=False),
ddf.clear_divisions(), check_divisions=True)
@write_read_engines_xfail
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(dd.read_parquet(fn, index=False, engine=read_engine, infer_divisions=False),
ddf2, check_index=False, check_divisions=True)
# Two columns, none as index
assert_eq(dd.read_parquet(fn, index=False, columns=['x', 'y'], engine=read_engine,
infer_divisions=False),
ddf2[['x', 'y']], check_index=False, check_divisions=True)
# One column and one index, all as columns
assert_eq(dd.read_parquet(fn, index=False, columns=['myindex', 'x'], engine=read_engine,
infer_divisions=False),
ddf2[['myindex', 'x']], check_index=False, check_divisions=True)
@write_read_engines_xfail
def test_infer_divisions_not_sorted(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
if read_engine == 'pyarrow' and not check_pa_divs:
match = 'requires pyarrow >=0.9.0'
ex = NotImplementedError
else:
match = 'not known to be sorted across partitions'
ex = ValueError
with pytest.raises(ex, match=match):
dd.read_parquet(fn, index='x', engine=read_engine, infer_divisions=True)
@write_read_engines_xfail
def test_infer_divisions_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
if read_engine == 'pyarrow' and not check_pa_divs:
match = 'requires pyarrow >=0.9.0'
ex = NotImplementedError
else:
match = 'no index column was discovered'
ex = ValueError
with pytest.raises(ex, match=match):
dd.read_parquet(fn, engine=read_engine, infer_divisions=True)
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), 'test.parquet')
index = pd.MultiIndex.from_arrays([np.arange(10), np.arange(10) + 1],
names=['x0', 'x1'])
df = pd.DataFrame(np.random.randn(10, 2), columns=['a', 'b'], index=index)
df2 = df.reset_index(drop=False)
if engine == 'fastparquet':
fastparquet.write(fn, df, write_index=True)
# fastparquet doesn't support multi-index
with pytest.raises(ValueError):
ddf = dd.read_parquet(fn, engine=engine)
else:
import pyarrow as pa
pq.write_table(pa.Table.from_pandas(df), fn)
# Pyarrow supports multi-index reads
ddf = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns='a', engine=engine)
assert_eq(d, df['a'])
d = dd.read_parquet(fn, index=['a', 'b'], columns=['x0', 'x1'], engine=engine)
assert_eq(d, df2.set_index(['a', 'b'])[['x0', 'x1']])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, index=['a'], engine=engine)
assert_eq(d, df2.set_index('a')[['b']])
d = dd.read_parquet(fn, index=['x0'], engine=engine)
assert_eq(d, df2.set_index('x0')[['a', 'b']])
# Just columns
d = dd.read_parquet(fn, columns=['x0', 'a'], engine=engine)
assert_eq(d, df2.set_index('x1')[['x0', 'a']])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=['x0', 'b'], engine=engine)
assert_eq(d, df2[['x0', 'b']])
for index in ['x1', 'b']:
d = dd.read_parquet(fn, index=index, columns=['x0', 'a'], engine=engine)
assert_eq(d, df2.set_index(index)[['x0', 'a']])
# Columns and index intersect
for index in ['a', 'x0']:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=['x0', 'a'], engine=engine)
# Series output
for ind, col, sol_df in [(None, 'x0', df2.set_index('x1')),
(False, 'b', df2),
(False, 'x0', df2),
('a', 'x0', df2.set_index('a')),
('a', 'b', df2.set_index('a'))]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines_xfail
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=['x'], engine=engine, infer_divisions=should_check_divs(engine))
assert_eq(ddf[['x']], ddf2, check_divisions=should_check_divs(engine))
ddf2 = dd.read_parquet(fn, columns='x', index='myindex', engine=engine, infer_divisions=should_check_divs(engine))
assert_eq(ddf.x, ddf2, check_divisions=should_check_divs(engine))
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert (set(read(fn).dask) == set(read(fn).dask))
assert (set(read(fn).dask) !=
set(read(fn, columns=['x']).dask))
assert (set(read(fn, columns=('x',)).dask) ==
set(read(fn, columns=['x']).dask))
@pytest.mark.parametrize('c', [['x'], 'x', ['x', 'y'], []])
def test_optimize(tmpdir, c):
check_fastparquet()
fn = str(tmpdir)
ddf.to_parquet(fn)
ddf2 = dd.read_parquet(fn)
assert_eq(df[c], ddf2[c])
x = ddf2[c]
dsk = x.__dask_optimize__(x.dask, x.__dask_keys__())
assert len(dsk) == x.npartitions
assert all(v[4] == c for v in dsk.values())
@pytest.mark.skipif(not hasattr(pd.DataFrame, 'to_parquet'),
reason="no to_parquet method")
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join('test.parquet'))
df = pd.DataFrame({'x': [1, 2, 3]})
df.to_parquet(fn, engine=write_engine)
ddf = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf)
@write_read_engines_xfail
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({'x': ['a', 'b', 'c'] * 100}, dtype='category')
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories='x', engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ['a', 'b', 'c']
ddf2 = dd.read_parquet(tmp, categories=['x'], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ['a', 'b', 'c']
# autocat
if read_engine != 'pyarrow':
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ['a', 'b', 'c']
ddf2.loc[:1000].compute()
df.index.name = 'index' # defaults to 'index' in this case
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
check_fastparquet()
tmp = str(tmpdir)
df = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice(['hello', 'yo', 'people'],
size=1000).astype("O")})
df.index.name = 'index'
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp)
ddf2.to_parquet(tmp, append=True)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir):
"""Test that appended parquet equal to the original one."""
check_fastparquet()
tmp = str(tmpdir)
df = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice(['hello', 'yo', 'people'],
size=1000).astype("O")})
df.index.name = 'index'
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, append=True)
ddf2.to_parquet(tmp, append=True)
ddf3 = dd.read_parquet(tmp, engine='fastparquet')
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir):
check_fastparquet()
tmp = str(tmpdir)
df0 = pd.DataFrame({'lat': np.arange(0, 10), 'lon': np.arange(10, 20),
'value': np.arange(100, 110)})
df0.index.name = 'index'
df1 = pd.DataFrame({'lat': np.arange(10, 20), 'lon': np.arange(10, 20),
'value': np.arange(120, 130)})
df1.index.name = 'index'
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=['lon'])
dd.to_parquet(dd_df1, tmp, partition_on=['lon'], append=True,
ignore_divisions=True)
out = dd.read_parquet(tmp).compute()
out['lon'] = out.lon.astype('int64') # just to pass assert
# sort required since partitioning breaks index order
assert_eq(out.sort_values('value'), pd.concat([df0, df1])[out.columns],
check_index=False)
def test_partition_on_cats(tmpdir):
check_fastparquet()
tmp = str(tmpdir)
d = pd.DataFrame({'a': np.random.rand(50),
'b': np.random.choice(['x', 'y', 'z'], size=50),
'c': np.random.choice(['x', 'y', 'z'], size=50)})
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=['b'], engine='fastparquet')
df = dd.read_parquet(tmp, engine='fastparquet')
assert set(df.b.cat.categories) == {'x', 'y', 'z'}
d.to_parquet(tmp, partition_on=['b', 'c'], engine='fastparquet')
df = dd.read_parquet(tmp, engine='fastparquet')
assert set(df.b.cat.categories) == {'x', 'y', 'z'}
assert set(df.c.cat.categories) == {'x', 'y', 'z'}
df = dd.read_parquet(tmp, columns=['a', 'c'], engine='fastparquet')
assert set(df.c.cat.categories) == {'x', 'y', 'z'}
assert 'b' not in df.columns
df = dd.read_parquet(tmp, index='c', engine='fastparquet')
assert set(df.index.categories) == {'x', 'y', 'z'}
assert 'c' not in df.columns
# series
df = dd.read_parquet(tmp, columns='b', engine='fastparquet')
assert set(df.cat.categories) == {'x', 'y', 'z'}
def test_append_wo_index(tmpdir):
"""Test append with write_index=False."""
check_fastparquet()
tmp = str(tmpdir.join('tmp1.parquet'))
df = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice(['hello', 'yo', 'people'],
size=1000).astype("O")})
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True)
assert 'Appended columns' in str(excinfo.value)
tmp = str(tmpdir.join('tmp2.parquet'))
ddf1.to_parquet(tmp, write_index=False)
ddf2.to_parquet(tmp, write_index=False, append=True)
ddf3 = dd.read_parquet(tmp, index='f')
assert_eq(df.set_index('f'), ddf3)
def test_append_overlapping_divisions(tmpdir):
"""Test raising of error when divisions overlapping."""
check_fastparquet()
tmp = str(tmpdir)
df = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice(['hello', 'yo', 'people'],
size=1000).astype("O")})
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10:], chunksize=100)
ddf1.to_parquet(tmp)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, append=True)
assert 'Appended divisions' in str(excinfo.value)
ddf2.to_parquet(tmp, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir):
"""Test raising of error when non equal columns."""
check_fastparquet()
tmp = str(tmpdir)
df1 = pd.DataFrame({'i32': np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({'i64': np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({'i32': np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, append=True)
assert 'Appended columns' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, append=True)
assert 'Appended dtypes' in str(excinfo.value)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({'a': [1, 2, 3],
'b': [10, 20, 30],
'c': [100, 200, 300]},
index=pd.Index([-1, -2, -3], name='myindex'),
columns=['c', 'a', 'b'])
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == 'fastparquet':
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ['myindex', 'c', 'a', 'b']
ddf2 = dd.read_parquet(tmp, index='myindex', engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'f': np.arange(1000, dtype=np.float64)})
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp)
df2 = dd.read_parquet(tmp,
columns=['i32', 'f'],
engine=engine,
infer_divisions=should_check_divs(engine))
assert_eq(df[['i32', 'f']], df2,
check_index=False, check_divisions=should_check_divs(engine))
df3 = dd.read_parquet(tmp,
columns=['f', 'i32'],
engine=engine,
infer_divisions=should_check_divs(engine))
assert_eq(df[['f', 'i32']], df3,
check_index=False, check_divisions=should_check_divs(engine))
@pytest.mark.parametrize('df,write_kwargs,read_kwargs', [
(pd.DataFrame({'x': [3, 2, 1]}), {}, {}),
(pd.DataFrame({'x': ['c', 'a', 'b']}), {'object_encoding': 'utf8'}, {}),
(pd.DataFrame({'x': ['cc', 'a', 'bbb']}), {'object_encoding': 'utf8'}, {}),
(pd.DataFrame({'x': [b'a', b'b', b'c']}), {'object_encoding': 'bytes'}, {}),
(pd.DataFrame({'x': pd.Categorical(['a', 'b', 'a'])}),
{'object_encoding': 'utf8'}, {'categories': ['x']}),
(pd.DataFrame({'x': pd.Categorical([1, 2, 1])}), {}, {'categories': ['x']}),
(pd.DataFrame({'x': list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({'x': [3000, 2000, 1000]}).astype('M8[ns]'), {}, {}),
pytest.mark.xfail((pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), {}, {}),
reason="Parquet doesn't support nanosecond precision"),
(pd.DataFrame({'x': [3, 2, 1]}).astype('M8[us]'), {}, {}),
(pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ms]'), {}, {}),
(pd.DataFrame({'x': [3, 2, 1]}).astype('uint16'), {}, {}),
(pd.DataFrame({'x': [3, 2, 1]}).astype('float32'), {}, {}),
(pd.DataFrame({'x': [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({'x': [3, 1, 5]}, index=pd.Index([1, 2, 3], name='foo')), {}, {}),
(pd.DataFrame({'x': [1, 2, 3],
'y': [3, 2, 1]}), {}, {}),
(pd.DataFrame({'x': [1, 2, 3],
'y': [3, 2, 1]}, columns=['y', 'x']), {}, {}),
(pd.DataFrame({'0': [3, 2, 1]}), {}, {}),
(pd.DataFrame({'x': [3, 2, None]}), {}, {}),
(pd.DataFrame({'-': [3., 2., None]}), {}, {}),
(pd.DataFrame({'.': [3., 2., None]}), {}, {}),
(pd.DataFrame({' ': [3., 2., None]}), {}, {}),
])
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs):
check_fastparquet()
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = 'index'
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, **read_kwargs)
assert_eq(ddf, ddf2)
def test_categories(tmpdir):
check_fastparquet()
fn = str(tmpdir)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': list('caaab')})
ddf = dd.from_pandas(df, npartitions=2)
ddf['y'] = ddf.y.astype('category')
ddf.to_parquet(fn)
ddf2 = dd.read_parquet(fn, categories=['y'])
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {'a', 'b', 'c'}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories).compute()
assert cats_set.tolist() == ['a', 'c', 'a', 'b']
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=['x']).compute()
with pytest.raises(ValueError):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=['foo'])
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = tm.makeTimeDataFrame()
df.index.name = 'foo'
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine, infer_divisions=should_check_divs(engine))
assert_eq(ddf, ddf2, check_divisions=should_check_divs(engine))
def test_to_parquet_default_writes_nulls(tmpdir):
check_fastparquet()
check_pyarrow()
fn = str(tmpdir.join('test.parquet'))
df = pd.DataFrame({'c1': [1., np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@write_read_engines(
xfail_pyarrow_fastparquet=pyarrow_fastparquet_msg,
xfail_pyarrow_pyarrow=("Race condition writing using pyarrow with partition_on. "
"Fixed on master, but not on pyarrow 0.8.0")
)
def test_partition_on(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({'a': np.random.choice(['A', 'B', 'C'], size=100),
'b': np.random.random(size=100),
'c': np.random.randint(1, 5, size=100)})
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=['a'], engine=write_engine)
out = dd.read_parquet(tmpdir, engine=read_engine).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
def test_filters(tmpdir):
check_fastparquet()
fn = str(tmpdir)
df = pd.DataFrame({'at': ['ab', 'aa', 'ba', 'da', 'bb']})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(fn, write_index=False)
ddf2 = dd.read_parquet(fn, index=False,
filters=[('at', '==', 'aa')]).compute()
assert_eq(ddf2, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn)
dd.read_parquet(fn).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
ddf.repartition(npartitions=2, force=True).to_parquet(fn)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[('at', '==', 'aa')])
assert len(df2) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn)
dd.read_parquet(fn, filters=[('at', '==', 'aa')]).compute()
assert len(ddf2) > 0
def test_read_from_fastparquet_parquetfile(tmpdir):
check_fastparquet()
fn = str(tmpdir)
df = pd.DataFrame({
'a': np.random.choice(['A', 'B', 'C'], size=100),
'b': np.random.random(size=100),
'c': np.random.randint(1, 5, size=100)
})
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=['a'], engine='fastparquet')
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[('a', '==', 'B')]).compute()
assert set(df.b[df.a == 'B']) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine='pyarrow')
@pytest.mark.parametrize('scheduler', ['threads', 'processes'])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1., 2., 3., 4.]})
df.index.name = 'index'
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, 'dask')
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine, infer_divisions=should_check_divs(engine))
assert_eq(ddf, ddf2, check_divisions=should_check_divs(engine))
def test_timestamp96(tmpdir):
check_fastparquet()
fn = str(tmpdir)
df = pd.DataFrame({'a': ['now']}, dtype='M8[ns]')
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times='int96')
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn).compute()
assert_eq(out, df)
def test_drill_scheme(tmpdir):
check_fastparquet()
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N)
for i, c in enumerate(['a', 'b', 'c'])})
df2 = pd.DataFrame({c: np.random.random(N)
for i, c in enumerate(['a', 'b', 'c'])})
files = []
for d in ['test_data1', 'test_data2']:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, 'data1.parq'))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert 'dir0' in df.columns
out = df.compute()
assert 'dir0' in out
assert (np.unique(out.dir0) == ['test_data1', 'test_data2']).all()
def test_parquet_select_cats(tmpdir):
check_fastparquet()
fn = str(tmpdir)
df = pd.DataFrame({
'categories': pd.Series(
np.random.choice(['a', 'b', 'c', 'd', 'e', 'f'], size=100),
dtype='category'),
'ints': pd.Series(list(range(0, 100)), dtype='int'),
'floats': pd.Series(list(range(0, 100)), dtype='float')})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn)
rddf = dd.read_parquet(fn, columns=['ints'])
assert list(rddf.columns) == ['ints']
rddf = dd.read_parquet(fn)
assert list(rddf.columns) == list(df)
@write_read_engines(
xfail_pyarrow_fastparquet=pyarrow_fastparquet_msg,
xfail_fastparquet_pyarrow="fastparquet gh#251"
)
def test_columns_name(tmpdir, write_engine, read_engine):
if write_engine == 'fastparquet':
pytest.skip('Fastparquet does not write column_indexes')
if write_engine == 'pyarrow':
import pyarrow as pa
if pa.__version__ < LooseVersion('0.8.0'):
pytest.skip("pyarrow<0.8.0 did not write column_indexes")
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(['a', 'b'], name='idx'))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
tmp = str(tmpdir)
ddf.to_parquet(tmp, engine=write_engine)
result = dd.read_parquet(tmp, engine=read_engine)
assert_eq(result, df)
@pytest.mark.parametrize('compression,', ['default', None, 'gzip', 'snappy'])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if engine == 'fastparquet' and compression in ['snappy', 'default']:
pytest.importorskip('snappy')
df = pd.DataFrame({'x': ['a', 'b', 'c'] * 10,
'y': [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine)
if engine == 'fastparquet' and compression == 'default':
# ensure default compression for fastparquet is Snappy
import fastparquet
pf = fastparquet.ParquetFile(fn)
assert pf.row_groups[0].columns[0].meta_data.codec == 1
out = dd.read_parquet(fn, engine=engine, infer_divisions=should_check_divs(engine))
assert_eq(out, ddf, check_index=(engine != 'fastparquet'), check_divisions=should_check_divs(engine))
@pytest.fixture(params=[
# fastparquet 0.1.3
{'columns': [{'metadata': None,
'name': 'idx',
'numpy_type': 'int64',
'pandas_type': 'int64'},
{'metadata': None,
'name': 'A',
'numpy_type': 'int64',
'pandas_type': 'int64'}],
'index_columns': ['idx'],
'pandas_version': '0.21.0'},
# pyarrow 0.7.1
{'columns': [{'metadata': None,
'name': 'A',
'numpy_type': 'int64',
'pandas_type': 'int64'},
{'metadata': None,
'name': 'idx',
'numpy_type': 'int64',
'pandas_type': 'int64'}],
'index_columns': ['idx'],
'pandas_version': '0.21.0'},
# pyarrow 0.8.0
{'column_indexes': [{'field_name': None,
'metadata': {'encoding': 'UTF-8'},
'name': None,
'numpy_type': 'object',
'pandas_type': 'unicode'}],
'columns': [{'field_name': 'A',
'metadata': None,
'name': 'A',
'numpy_type': 'int64',
'pandas_type': 'int64'},
{'field_name': '__index_level_0__',
'metadata': None,
'name': 'idx',
'numpy_type': 'int64',
'pandas_type': 'int64'}],
'index_columns': ['__index_level_0__'],
'pandas_version': '0.21.0'},
# TODO: fastparquet update
])
def pandas_metadata(request):
return request.param
def test_parse_pandas_metadata(pandas_metadata):
index_names, column_names, mapping, column_index_names = (
_parse_pandas_metadata(pandas_metadata)
)
assert index_names == ['idx']
assert column_names == ['A']
assert column_index_names == [None]
# for new pyarrow
if pandas_metadata['index_columns'] == ['__index_level_0__']:
assert mapping == {'__index_level_0__': 'idx', 'A': 'A'}
else:
assert mapping == {'idx': 'idx', 'A': 'A'}
assert isinstance(mapping, dict)
def test_parse_pandas_metadata_null_index():
# pyarrow 0.7.1 None for index
e_index_names = [None]
e_column_names = ['x']
e_mapping = {'__index_level_0__': None, 'x': 'x'}
e_column_index_names = [None]
md = {'columns': [{'metadata': None,
'name': 'x',
'numpy_type': 'int64',
'pandas_type': 'int64'},
{'metadata': None,
'name': '__index_level_0__',
'numpy_type': 'int64',
'pandas_type': 'int64'}],
'index_columns': ['__index_level_0__'],
'pandas_version': '0.21.0'}
index_names, column_names, mapping, column_index_names = (
_parse_pandas_metadata(md)
)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
# pyarrow 0.8.0 None for index
md = {'column_indexes': [{'field_name': None,
'metadata': {'encoding': 'UTF-8'},
'name': None,
'numpy_type': 'object',
'pandas_type': 'unicode'}],
'columns': [{'field_name': 'x',
'metadata': None,
'name': 'x',
'numpy_type': 'int64',
'pandas_type': 'int64'},
{'field_name': '__index_level_0__',
'metadata': None,
'name': None,
'numpy_type': 'int64',
'pandas_type': 'int64'}],
'index_columns': ['__index_level_0__'],
'pandas_version': '0.21.0'}
index_names, column_names, mapping, column_index_names = (
_parse_pandas_metadata(md)
)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
def test_pyarrow_raises_filters_categoricals(tmpdir):
check_pyarrow()
tmp = str(tmpdir)
data = pd.DataFrame({"A": [1, 2]})
df = dd.from_pandas(data, npartitions=2)
df.to_parquet(tmp, write_index=False, engine="pyarrow")
with pytest.raises(NotImplementedError):
dd.read_parquet(tmp, engine="pyarrow", filters=["A>1"])
def test_read_no_metadata(tmpdir, engine):
# use pyarrow.parquet to create a parquet file without
# pandas metadata
pa = pytest.importorskip("pyarrow")
import pyarrow.parquet as pq
tmp = str(tmpdir) + "table.parq"
table = pa.Table.from_arrays([pa.array([1, 2, 3]),
pa.array([3, 4, 5])],
names=['A', 'B'])
pq.write_table(table, tmp)
result = dd.read_parquet(tmp, engine=engine)
expected = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
assert_eq(result, expected)
def test_parse_pandas_metadata_duplicate_index_columns():
md = {
'column_indexes': [{
'field_name': None,
'metadata': {
'encoding': 'UTF-8'
},
'name': None,
'numpy_type': 'object',
'pandas_type': 'unicode'
}],
'columns': [{
'field_name': 'A',
'metadata': None,
'name': 'A',
'numpy_type': 'int64',
'pandas_type': 'int64'
}, {
'field_name': '__index_level_0__',
'metadata': None,
'name': 'A',
'numpy_type': 'object',
'pandas_type': 'unicode'
}],
'index_columns': ['__index_level_0__'],
'pandas_version': '0.21.0'
}
index_names, column_names, storage_name_mapping, column_index_names = (
_parse_pandas_metadata(md)
)
assert index_names == ['A']
assert column_names == ['A']
assert storage_name_mapping == {'__index_level_0__': 'A', 'A': 'A'}
assert column_index_names == [None]
def test_parse_pandas_metadata_column_with_index_name():
md = {
'column_indexes': [{
'field_name': None,
'metadata': {
'encoding': 'UTF-8'
},
'name': None,
'numpy_type': 'object',
'pandas_type': 'unicode'
}],
'columns': [{
'field_name': 'A',
'metadata': None,
'name': 'A',
'numpy_type': 'int64',
'pandas_type': 'int64'
}, {
'field_name': '__index_level_0__',
'metadata': None,
'name': 'A',
'numpy_type': 'object',
'pandas_type': 'unicode'
}],
'index_columns': ['__index_level_0__'],
'pandas_version': '0.21.0'
}
index_names, column_names, storage_name_mapping, column_index_names = (
_parse_pandas_metadata(md)
)
assert index_names == ['A']
assert column_names == ['A']
assert storage_name_mapping == {'__index_level_0__': 'A', 'A': 'A'}
assert column_index_names == [None]
def test_writing_parquet_with_kwargs(tmpdir, engine):
fn = str(tmpdir)
path1 = os.path.join(fn, 'normal')
path2 = os.path.join(fn, 'partitioned')
df = pd.DataFrame({'a': np.random.choice(['A', 'B', 'C'], size=100),
'b': np.random.random(size=100),
'c': np.random.randint(1, 5, size=100)})
ddf = dd.from_pandas(df, npartitions=3)
engine_kwargs = {
'pyarrow': {
'compression': 'snappy',
'coerce_timestamps': None,
'use_dictionary': True
},
'fastparquet': {
'compression': 'snappy',
'times': 'int64',
'fixed_text': None
}
}
ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])
out = dd.read_parquet(path1, engine=engine, infer_divisions=should_check_divs(engine))
assert_eq(out, ddf, check_index=(engine != 'fastparquet'), check_divisions=should_check_divs(engine))
# Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets
with dask.config.set(scheduler='sync'):
ddf.to_parquet(path2, engine=engine, partition_on=['a'],
**engine_kwargs[engine])
out = dd.read_parquet(path2, engine=engine).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):
fn = str(tmpdir)
with pytest.raises(TypeError):
ddf.to_parquet(fn, engine=engine, unknown_key='unknown_value')
def test_select_partitioned_column(tmpdir, engine):
if engine == 'pyarrow':
import pyarrow as pa
if pa.__version__ < LooseVersion('0.9.0'):
pytest.skip("pyarrow<0.9.0 did not support this")
fn = str(tmpdir)
size = 20
d = {'signal1': np.random.normal(0, 0.3, size=size).cumsum() + 50,
'fake_categorical1': np.random.choice(['A', 'B', 'C'], size=size),
'fake_categorical2': np.random.choice(['D', 'E', 'F'], size=size)}
df = dd.from_pandas(pd.DataFrame(d), 2)
df.to_parquet(fn, compression='snappy', write_index=False, engine=engine,
partition_on=['fake_categorical1', 'fake_categorical2'])
df_partitioned = dd.read_parquet(fn, engine=engine)
df_partitioned[df_partitioned.fake_categorical1 == 'A'].compute()
def test_arrow_partitioning(tmpdir):
# Issue #3518
pytest.importorskip('pyarrow')
path = str(tmpdir)
data = {
'p': np.repeat(np.arange(3), 2).astype(np.int8),
'b': np.repeat(-1, 6).astype(np.int16),
'c': np.repeat(-2, 6).astype(np.float32),
'd': np.repeat(-3, 6).astype(np.float64),
}
pdf = pd.DataFrame(data)
ddf = dd.from_pandas(pdf, npartitions=2)
ddf.to_parquet(path, engine='pyarrow', partition_on='p')
ddf = dd.read_parquet(path, engine='pyarrow')
ddf.astype({'b': np.float32}).compute()
| gpl-3.0 |
hbldh/skboost | skboost/stumps/decision_stump.py | 1 | 17561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`decision_stump`
==================
.. module:: decision_stump
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2014-08-31, 01:52
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from warnings import warn
from operator import itemgetter
import concurrent.futures as cfut
import psutil
import numpy as np
from scipy.sparse import issparse
import six
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state, check_array
from numpy.lib.arraysetops import unique
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import _tree
try:
import skboost.stumps.ext.classifiers as c_classifiers
except ImportError as e:
c_classifiers = None
_all__ = ["NMMDecisionStump", ]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
class DecisionStump(DecisionTreeClassifier):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
Not used in Stratos Decision Stump.
max_features : int, float, string or None, optional (default=None)
Not used in Stratos Decision Stump.
max_depth : integer or None, optional (default=None)
Not used in Stratos Decision Stump. Always a depth 1 tree.
min_samples_split : integer, optional (default=2)
Not used in Stratos Decision Stump.
min_samples_leaf : integer, optional (default=1)
Not used in Stratos Decision Stump.
random_state : int, RandomState instance or None, optional (default=None)
Not used in Stratos Decision Stump. Nothing random in learning.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
Alwats 2 fr this class.
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
distributed_learning=True,
calculate_probabilites=False,
method='bp'):
super(DecisionStump, self).__init__(criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
self.distributed_learning = distributed_learning
self.calculate_probabilites = calculate_probabilites
self.method = method
def fit(self, X, y, sample_mask=None,
X_argsorted=None, check_input=True, sample_weight=None):
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in six.moves.range(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
max_depth = 1
max_features = 10
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if self.method == 'bp':
self.tree_ = _fit_binary_decision_stump_breakpoint(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
elif self.method == 'bp_threaded':
self.tree_ = _fit_binary_decision_stump_breakpoint_threaded(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
else:
self.tree_ = _fit_binary_decision_stump_breakpoint(
X, y, sample_weight, X_argsorted, self.calculate_probabilites)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
if self.tree_.get('direction') > 0:
return ((X[:, self.tree_.get('best_dim')] > self.tree_.get('threshold')) * 2) - 1
else:
return ((X[:, self.tree_.get('best_dim')] <= self.tree_.get('threshold')) * 2) - 1
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = np.array(self.tree_['probabilities']).take(self.predict(X) > 0, axis=0)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in six.moves.range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def _fit_binary_decision_stump_breakpoint(X, y, sample_weight,
argsorted_X=None,
calculate_probabilities=False):
Y = (y.flatten() * 2) - 1
results = {
'min_value': None,
'best_dim': 0,
'threshold': 0,
'direction': 0,
'probabilities': []
}
if sample_weight is None:
sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)
else:
sample_weight /= np.sum(sample_weight)
classifier_result = []
for dim in six.moves.range(X.shape[1]):
if argsorted_X is not None:
sorted_x = X[argsorted_X[:, dim], dim]
w = sample_weight[argsorted_X[:, dim]]
sorted_y = Y[argsorted_X[:, dim]]
else:
data_order = np.argsort(X[:, dim])
sorted_x = X[data_order, dim]
w = sample_weight[data_order]
sorted_y = Y[data_order]
breakpoint_indices = np.where(np.diff(sorted_x))[0] + 1
w_pos_c = (w * (sorted_y > 0)).cumsum()
w_neg_c = (w * (sorted_y < 0)).cumsum()
left_errors = w_pos_c[breakpoint_indices] - w_neg_c[breakpoint_indices] + w_neg_c[-1]
right_errors = w_neg_c[breakpoint_indices] - w_pos_c[breakpoint_indices] + w_pos_c[-1]
best_left_point = np.argmin(left_errors)
best_right_point = np.argmin(right_errors)
if best_left_point < best_right_point:
output = [dim,
left_errors[best_left_point],
(sorted_x[breakpoint_indices[best_left_point] + 1] +
sorted_x[breakpoint_indices[best_left_point]]) / 2,
1]
else:
output = [dim,
right_errors[best_right_point],
(sorted_x[breakpoint_indices[best_right_point] + 1] +
sorted_x[breakpoint_indices[best_right_point]]) / 2,
-1]
classifier_result.append(output)
del sorted_x, sorted_y, left_errors, right_errors, w, w_pos_c, w_neg_c
# Sort the returned data after lowest error.
classifier_result = sorted(classifier_result, key=itemgetter(1))
best_result = classifier_result[0]
results['best_dim'] = int(best_result[0])
results['min_value'] = float(best_result[1])
# If the data is in integers, then set the threshold in integer as well.
if X.dtype.kind in ('u', 'i'):
results['threshold'] = int(best_result[2])
else:
results['threshold'] = float(best_result[2])
# Direction is defined as 1 if the positives labels are at
# higher values and -1 otherwise.
results['direction'] = int(best_result[3])
if calculate_probabilities:
results['probabilities'] = _calculate_probabilities(
X[:, results['best_dim']], Y, results)
return results
def _fit_binary_decision_stump_breakpoint_threaded(X, y, sample_weight,
argsorted_X=None,
calculate_probabilities=False):
Y = y.flatten() * 2 - 1
results = {
'min_value': None,
'best_dim': 0,
'threshold': 0,
'direction': 0,
'probabilities': []
}
if sample_weight is None:
sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)
else:
sample_weight /= np.sum(sample_weight)
classifier_result = []
tpe = cfut.ThreadPoolExecutor(max_workers=psutil.cpu_count())
futures = []
if argsorted_X is not None:
for dim in six.moves.range(X.shape[1]):
futures.append(
tpe.submit(_breakpoint_learn_one_dimension, dim, X[:, dim], Y, sample_weight, argsorted_X[:, dim]))
else:
for dim in six.moves.range(X.shape[1]):
futures.append(tpe.submit(_breakpoint_learn_one_dimension, dim, X[:, dim], Y, sample_weight))
for future in cfut.as_completed(futures):
classifier_result.append(future.result())
# Sort the returned data after lowest error.
classifier_result = sorted(classifier_result, key=itemgetter(1))
best_result = classifier_result[0]
results['best_dim'] = int(best_result[0])
results['min_value'] = float(best_result[1])
# If the data is in integers, then set the threshold in integer as well.
if X.dtype.kind in ('u', 'i'):
results['threshold'] = int(best_result[2])
else:
results['threshold'] = float(best_result[2])
# Direction is defined as 1 if the positives labels are at
# higher values and -1 otherwise.
results['direction'] = int(best_result[3])
if calculate_probabilities:
results['probabilities'] = _calculate_probabilities(X[:, results['best_dim']], Y, results)
return results
def _calculate_probabilities(X, Y, results):
if results['direction'] > 0:
labels = X > results['threshold']
else:
labels = X <= results['threshold']
n_correct_negs = sum(Y[-labels] < 0)
n_false_negs = sum(Y[-labels] > 0)
n_false_pos = sum(Y[labels] < 0)
n_correct_pos = sum(Y[labels] > 0)
return [[n_correct_negs / len(Y), n_false_negs / len(Y)],
[n_false_pos / len(Y), n_correct_pos / len(Y)]]
def _breakpoint_learn_one_dimension(dim_nbr, x, y, sample_weights, sorting_argument=None):
if sorting_argument is None:
sorting_argument = np.argsort(x)
sorted_x = x[sorting_argument]
w = sample_weights[sorting_argument]
sorted_y = y[sorting_argument]
breakpoint_indices = np.where(np.diff(sorted_x))[0] + 1
w_pos_c = (w * (sorted_y > 0)).cumsum()
w_neg_c = (w * (sorted_y < 0)).cumsum()
left_errors = w_pos_c[breakpoint_indices] - w_neg_c[breakpoint_indices] + w_neg_c[-1]
right_errors = w_neg_c[breakpoint_indices] - w_pos_c[breakpoint_indices] + w_pos_c[-1]
best_left_point = np.argmin(left_errors)
best_right_point = np.argmin(right_errors)
if best_left_point < best_right_point:
output = [dim_nbr,
left_errors[best_left_point],
(sorted_x[breakpoint_indices[best_left_point] - 1] +
sorted_x[breakpoint_indices[best_left_point]]) / 2,
1]
else:
output = [dim_nbr,
right_errors[best_right_point],
(sorted_x[breakpoint_indices[best_right_point] + 1] +
sorted_x[breakpoint_indices[best_right_point]]) / 2,
-1]
return output
| mit |
eric-haibin-lin/mxnet | python/mxnet/ndarray/numpy/_op.py | 2 | 252233 | # pylint: disable=C0302
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Namespace for numpy operators used in Gluon dispatched by F=ndarray."""
import numpy as _np
from ...base import numeric_types, integer_types
from ...util import _sanity_check_params, set_module
from ...util import wrap_np_unary_func, wrap_np_binary_func
from ...context import current_context
from . import _internal as _npi
from ..ndarray import NDArray
__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',
'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert',
'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',
'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',
'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',
'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',
'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'where', 'bincount', 'pad']
@set_module('mxnet.ndarray.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return a.shape
@set_module('mxnet.ndarray.numpy')
def zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `zeros` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `ones` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def broadcast_to(array, shape):
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
if _np.isscalar(array):
return full(shape, array)
return _npi.broadcast_to(array, shape)
@set_module('mxnet.ndarray.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Have an additional `ctx` argument to specify the device
- Have an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, NDArray):
if dtype is None:
ret = broadcast_to(fill_value, shape)
else:
ret = broadcast_to(fill_value, shape).astype(dtype)
return ret
dtype = _np.float32 if dtype is None else dtype
return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',
_np.int64:'int64', _np.float16:'float16', _np.float32:'float32',
_np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}
if order != 'C':
raise NotImplementedError("Only support C-order at this moment")
if subok:
raise NotImplementedError("Creating array by using sub-class is not supported at this moment")
if shape is not None:
raise NotImplementedError("Assigning new shape is not supported at this moment")
try:
dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]
except:
raise NotImplementedError("Do not support this dtype at this moment")
return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
@set_module('mxnet.ndarray.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. The default is `float32`.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if stop is None:
stop = start
start = 0
if step is None:
step = 1
if start is None and stop is None:
raise ValueError('start and stop cannot be both None')
if step == 0:
raise ZeroDivisionError('step cannot be 0')
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``numpy.float32``.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if not isinstance(n, int):
raise TypeError("Input 'n' should be an integer")
if n < 0:
raise ValueError("Input 'n' cannot be negative")
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
if mode not in ('wrap', 'clip', 'raise'):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
return _npi.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
if isinstance(values, numeric_types):
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, obj, val=values, axis=axis)
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if not isinstance(values, NDArray):
raise TypeError("'values' can not support type {}".format(str(type(values))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, values, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : ndarray or numeric value
Left-hand side operand.
rhs : ndarray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``ndarray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``ndarray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``ndarray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
mxnet.numpy.ndarray or scalar
result array or scalar
"""
from ...numpy import ndarray
from ..ndarray import from_numpy # pylint: disable=unused-import
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs, out=out)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs), out=out)
else:
return rfn_scalar(rhs, float(lhs), out=out)
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs), out=out)
elif isinstance(lhs, ndarray) and isinstance(rhs, ndarray):
return fn_array(lhs, rhs, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(rhs))))
#pylint: enable= too-many-arguments, no-member, protected-access
@set_module('mxnet.ndarray.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
- Only support ndarray as input.
- Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)
if isinstance(ret, list):
return tuple(ret)
else:
return ret
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
"""
Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,
_npi.rsubtract_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2
are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""
Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.delete(arr, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.delete(arr, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None):
"""
Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m):
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _npi.matmul(a, b, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)
@set_module('mxnet.ndarray.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')
@set_module('mxnet.ndarray.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.sort(data=a, axis=axis, is_ascend=True)
@set_module('mxnet.ndarray.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
if _np.isscalar(axes):
return _npi.tensordot_int_axes(a, b, axes)
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes_summed, b_axes_summed = axes
if _np.isscalar(a_axes_summed):
a_axes_summed = (a_axes_summed,)
if _np.isscalar(b_axes_summed):
b_axes_summed = (b_axes_summed,)
if len(a_axes_summed) != len(b_axes_summed):
raise ValueError('Axes length mismatch')
return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)
@set_module('mxnet.ndarray.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or NDArray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
"""
if normed is True:
raise NotImplementedError("normed is not supported yet...")
if weights is not None:
raise NotImplementedError("weights is not supported yet...")
if density is True:
raise NotImplementedError("density is not supported yet...")
if isinstance(bins, numeric_types):
if range is None:
raise NotImplementedError("automatic range is not supported yet...")
return _npi.histogram(a, bin_cnt=bins, range=range)
if isinstance(bins, (list, tuple)):
raise NotImplementedError("array_like bins is not supported yet...")
if isinstance(bins, str):
raise NotImplementedError("string bins is not supported yet...")
if isinstance(bins, NDArray):
return _npi.histogram(a, bins=bins)
raise ValueError("np.histogram fails with", locals())
@set_module('mxnet.ndarray.numpy')
def eye(N, M=None, k=0, dtype=_np.float32, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
"""
_sanity_check_params('eye', ['order'], kwargs)
ctx = kwargs.pop('ctx', current_context())
if ctx is None:
ctx = current_context()
return _npi.eye(N, M, k, ctx, dtype)
@set_module('mxnet.ndarray.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
if isinstance(start, (list, _np.ndarray, NDArray)) or \
isinstance(stop, (list, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
if retstep:
step = (stop - start) / (num - 1)
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step
else:
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code. Now wo only support axis = 0.
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \
isinstance(stop, (list, tuple, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int and float')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _npi.tril(m, k)
def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):
"""Helper function for unary operators.
Parameters
----------
x : ndarray or scalar
Input of the unary operator.
fn_array : function
Function to be called if x is of ``ndarray`` type.
fn_scalar : function
Function to be called if x is a Python scalar.
out : ndarray
The buffer ndarray for storing the result of the unary function.
Returns
-------
out : mxnet.numpy.ndarray or scalar
Result array or scalar.
"""
if isinstance(x, numeric_types):
return fn_scalar(x, **kwargs)
elif isinstance(x, NDArray):
return fn_array(x, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which t'absolute', he result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
r"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
r"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
>>> # Use scalars as inputs:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
>>> # Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.expm1(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
r"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
See also
----------
cos, arctan, arcsin
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using default float32 dtype may lead to slightly different behavior:
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x *180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
Returns:
---------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples:
---------
>>> np.negative(1)
-1
"""
return _unary_func_helper(x, _npi.negative, _np.negative, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
r"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _unary_func_helper(x, _npi.fix, _np.fix, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
where : ndarray, optional
Values of True indicate to calculate the ufunc at that position,
values of False indicate to leave the value in the output alone.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples:
---------
>>> np.tan(0.5)
0.5463024898437905
"""
return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The floor of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic sine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(0.0)
0.0
"""
return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
"""
axis_size = ary.shape[axis]
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
if axis_size % sections:
raise ValueError('array split does not result in an equal division')
section_size = int(axis_size / sections)
indices = [i * section_size for i in range(sections)]
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False)
assert isinstance(ret, list), 'Output of split should be list,' \
' got a return type {}'.format(type(ret))
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
if len(ary.shape) < 1:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.hsplit(ary, indices, 1, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
if len(ary.shape) < 2:
raise ValueError("vsplit only works on arrays of 2 or more dimensions")
return split(ary, indices_or_sections, 0)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
if len(ary.shape) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def concatenate(seq, axis=0, out=None):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of ndarray
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
"""
return _npi.concatenate(*seq, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _npi.concatenate(arr, values, axis=axis, out=None)
@set_module('mxnet.ndarray.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of ndarray
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays."""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.stack(*arrays, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.column_stack(*tup)
@set_module('mxnet.ndarray.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.hstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.ndarray.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
ndarray `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
"""
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
if a_min is None:
a_min = float('-inf')
if a_max is None:
a_max = float('inf')
return _npi.clip(a, a_min, a_max, out=out)
@set_module('mxnet.ndarray.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _npi.argmax(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _npi.argmin(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 returned dtype, not float64
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
if weights is None:
return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)
else:
return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)
@set_module('mxnet.ndarray.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
mean(a, axis=None, dtype=None, out=None, keepdims=None)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default is float32;
for floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def indices(dimensions, dtype=_np.int32, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `float32`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int32)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
if isinstance(dimensions, (tuple, list)):
if ctx is None:
ctx = current_context()
return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)
@set_module('mxnet.ndarray.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
if order == 'F':
raise NotImplementedError('order {} is not supported'.format(order))
if isinstance(x, numeric_types):
return _np.reshape(x, -1)
elif isinstance(x, NDArray):
return _npi.reshape(x, -1)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
Returns:
-------------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
([3. 6. 6.]
[4. 5. 1.])
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if order == 'C':
if isinstance(indices, numeric_types):
return _np.unravel_index(indices, shape)
ret = _npi.unravel_index_fallback(indices, shape=shape)
ret_list = []
for item in ret:
ret_list += [item]
return tuple(ret_list)
else:
raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return tuple(_npi.diag_indices_from(arr))
@set_module('mxnet.ndarray.numpy')
def hanning(M, dtype=_np.float32, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hanning(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def hamming(M, dtype=_np.float32, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hamming(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def blackman(M, dtype=_np.float32, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
from ...numpy import ndarray
if isinstance(m, numeric_types):
return _np.flip(m, axis)
elif isinstance(m, ndarray):
return _npi.flip(m, axis, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(m))))
@set_module('mxnet.ndarray.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.ndarray.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag(np.array([1.,2.,3.]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.ndarray.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,
_npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : ndarray
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([12, 1], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)
array([26, 6])
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5])
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
@set_module('mxnet.ndarray.numpy')
def inner(a, b):
r"""
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.ndarray.numpy')
def outer(a, b):
r"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.ndarray.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.ndarray.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,
_npi.less_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)
@set_module('mxnet.ndarray.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,
_npi.less_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,
_npi.greater_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _npi.rot90(m, k=k, axes=axes)
@set_module('mxnet.ndarray.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
out = kwargs.pop('out', None)
subscripts = operands[0]
operands = operands[1:]
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))
@set_module('mxnet.ndarray.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
out = _npi.nonzero(a).transpose()
return tuple([out[i] for i in range(len(out))])
@set_module('mxnet.ndarray.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : ndarray
Input array
q : ndarray
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q, out=out)
return _npi.percentile(a, q, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be ndarray type even if it is a scalar
- do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q * 100, out=out)
return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis)
@set_module('mxnet.ndarray.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _npi.resize_fallback(a, new_shape=new_shape)
@set_module('mxnet.ndarray.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
if isinstance(x, numeric_types):
return _np.nan_to_num(x, copy, nan, posinf, neginf)
elif isinstance(x, NDArray):
if x.dtype in ['int8', 'uint8', 'int32', 'int64']:
return x
if not copy:
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def where(condition, x=None, y=None): # pylint: disable=too-many-return-statements
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
This function differs from the original `numpy.where
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in
the following way(s):
- If `condition` is a scalar, this operator returns x or y directly without broadcasting.
- If `condition` is ndarray, while both `x` and `y` are scalars,
the output dtype will be `float32`.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
if x is None and y is None:
return nonzero(condition)
else:
if isinstance(condition, numeric_types):
if condition != 0:
return x
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, NDArray) and isinstance(y, NDArray):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, NDArray):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, NDArray):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
@set_module('mxnet.ndarray.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
from ...numpy import ndarray
if isinstance(p, ndarray) and isinstance(x, ndarray):
return _npi.polyval(p, x)
elif not isinstance(p, ndarray) and not isinstance(x, ndarray):
return _np.polyval(p, x)
else:
raise TypeError('type not supported')
@set_module('mxnet.ndarray.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
if not isinstance(x, NDArray):
raise TypeError("Input data should be NDarray")
if minlength < 0:
raise ValueError("Minlength value should greater than 0")
if weights is None:
return _npi.bincount(x, minlength=minlength, has_weights=False)
return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)
@set_module('mxnet.ndarray.numpy')
def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
"""
# pylint: disable = too-many-return-statements, inconsistent-return-statements
if not _np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
if not isinstance(pad_width, tuple):
raise TypeError("`pad_width` must be tuple.")
if mode == "linear_ramp":
raise ValueError("mode {'linear_ramp'} is not supported.")
if mode == "wrap":
raise ValueError("mode {'wrap'} is not supported.")
if mode == "median":
raise ValueError("mode {'median'} is not supported.")
if mode == "mean":
raise ValueError("mode {'mean'} is not supported.")
if mode == "empty":
raise ValueError("mode {'empty'} is not supported.")
if callable(mode):
raise ValueError("mode {'<function>'} is not supported.")
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
if isinstance(mode, _np.compat.basestring):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode]))
unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode])
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
if mode == "constant":
values = kwargs.get("constant_values", 0)
if isinstance(values, tuple):
raise TypeError("unsupported constant_values type: {'tuple'}.")
_npi.pad(x, pad_width, mode='constant', constant_value=values)
elif mode == "symmetric":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even")
elif mode == "edge":
return _npi.pad(x, pad_width, mode='edge')
elif mode == "reflect":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='reflect', reflect_type="even")
elif mode == "maximum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='maximum')
elif mode == "minimum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='minimum')
return _npi.pad(x, pad_width, mode='constant', constant_value=0)
| apache-2.0 |
arahuja/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
ptitjano/bokeh | examples/compat/mpl_contour.py | 7 | 1028 | # demo inspired by: http://matplotlib.org/examples/pylab_examples/contour_demo.html
from bokeh import mpl
from bokeh.plotting import output_file, show
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
output_file("mpl_contour.html", title="mpl_contour.py example")
show(mpl.to_bokeh())
| bsd-3-clause |
bd-j/magellanic | magellanic/sfhs/prediction_scripts/predicted_total.py | 1 | 5894 | import sys, pickle, copy
import numpy as np
import matplotlib.pyplot as pl
import astropy.io.fits as pyfits
import magellanic.regionsed as rsed
import magellanic.mcutils as utils
from magellanic.lfutils import *
try:
import fsps
from sedpy import observate
except ImportError:
#you wont be able to predict the integrated spectrum or magnitudes
# filterlist must be set to None in calls to total_cloud_data
sps = None
wlengths = {'2': '{4.5\mu m}',
'4': '{8\mu m}'}
dmod = {'smc':18.9,
'lmc':18.5}
cloud_info = {}
cloud_info['smc'] = [utils.smc_regions(), 20, 23, [7, 13, 16], [3,5,6]]
cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]]
def total_cloud_data(cloud, filternames = None, basti=False,
lfstring=None, agb_dust=1.0,
one_metal=None):
#########
# SPS
#########
#
if filternames is not None:
sps = fsps.StellarPopulation(add_agb_dust_model=True)
sps.params['sfh'] = 0
sps.params['agb_dust'] = agb_dust
dust = ['nodust', 'agbdust']
sps.params['imf_type'] = 0.0 #salpeter
filterlist = observate.load_filters(filternames)
else:
filterlist = None
##########
# SFHs
##########
regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()]
if basti:
zlist = basti_zlist
if 'header' in regions.keys():
rheader = regions.pop('header') #dump the header info from the reg. dict
total_sfhs = None
for n, dat in regions.iteritems():
total_sfhs = sum_sfhs(total_sfhs, dat['sfhs'])
total_zmet = dat['zmet']
#collapse SFHs to one metallicity
if one_metal is not None:
ts = None
for sfh in total_sfhs:
ts = sum_sfhs(ts, sfh)
total_sfh = ts
zlist = [zlist[one_metal]]
total_zmet = [total_zmet[one_metal]]
#############
# LFs
############
bins = rsed.lfbins
if lfstring is not None:
# these are stored as a list of different metallicities
lffiles = [lfstring.format(z) for z in zlist]
lf_base = [read_villaume_lfs(f) for f in lffiles]
#get LFs broken out by age and metallicity as well as the total
lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base)
else:
lfs_zt, lf, logages = None, None, None
###########
# SED
############
if filterlist is not None:
spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps)
mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
maggies = 10**(-0.4 * np.atleast_1d(mags))
else:
maggies, mass = None, None
#############
# Write output
############
total_values = {}
total_values['agb_clf'] = lf
total_values['agb_clfs_zt'] = lfs_zt
total_values['clf_mags'] = bins
total_values['logages'] = logages
total_values['sed_ab_maggies'] = maggies
total_values['sed_filters'] = filternames
total_values['lffile'] = lfstring
total_values['mstar'] = mass
total_values['zlist'] = zlist
return total_values, total_sfhs
def sum_sfhs(sfhs1, sfhs2):
"""
Accumulate individual sets of SFHs into a total set of SFHs. This
assumes that the individual SFH sets all have the same number and
order of metallicities, and the same time binning.
"""
if sfhs1 is None:
return copy.deepcopy(sfhs2)
elif sfhs2 is None:
return copy.deepcopy(sfhs1)
else:
out = copy.deepcopy(sfhs1)
for s1, s2 in zip(out, sfhs2):
s1['sfr'] += s2['sfr']
return out
if __name__ == '__main__':
filters = ['galex_NUV', 'spitzer_irac_ch2',
'spitzer_irac_ch4', 'spitzer_mips_24']
#filters = None
ldir, cdir = 'lf_data/', 'composite_lfs/'
outst = '{0}_n2teffcut.p'
# total_cloud_data will loop over the appropriate (for the
# isochrone) metallicities for a given lfst filename template
lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt'
basti = False
agb_dust=1.0
agebins = np.arange(9)*0.3 + 7.4
#loop over clouds (and bands and agb_dust) to produce clfs
for cloud in ['smc']:
rdir = '{0}cclf_{1}_'.format(cdir, cloud)
for band in ['2','4']:
lfstring = lfst.format(ldir, agb_dust, band)
dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust,
lfstring=lfstring, basti=basti)
agebins = sfhs[0]['t1'][3:-1]
outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat')
write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring)
#fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud])
#fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band))
#fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band))
#pl.close(fig)
colheads = (len(agebins)-1) * ' N<m(t={})'
colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.)
tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages
in zip(dat['agb_clfs_zt'], dat['logages'])])
write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)],
outfile.replace(cdir,'byage_clfs/'), lfstring,
colheads=colheads)
pl.figure()
for s, z in zip(sfhs, dat['zlist']):
pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3)
pl.legend(loc=0)
pl.title(cloud.upper())
print(cloud, dat['mstar'])
| gpl-2.0 |
sunshinelover/chanlun | vn.trader/ctaAlgo/uiChanlunWidget.py | 1 | 68647 | # encoding: UTF-8
"""
缠论模块相关的GUI控制组件
"""
from vtGateway import VtSubscribeReq
from uiBasicWidget import QtGui, QtCore, BasicCell,BasicMonitor,TradingWidget
from eventEngine import *
from ctaBase import *
import pyqtgraph as pg
import numpy as np
import pymongo
from pymongo.errors import *
from datetime import datetime, timedelta
from ctaHistoryData import HistoryDataEngine
import time
import types
import pandas as pd
########################################################################
class MyStringAxis(pg.AxisItem):
def __init__(self, xdict, *args, **kwargs):
pg.AxisItem.__init__(self, *args, **kwargs)
self.x_values = np.asarray(xdict.keys())
self.x_strings = xdict.values()
def tickStrings(self, values, scale, spacing):
strings = []
for v in values:
# vs is the original tick value
vs = v * scale
# if we have vs in our values, show the string
# otherwise show nothing
if vs in self.x_values:
# Find the string with x_values closest to vs
vstr = self.x_strings[np.abs(self.x_values - vs).argmin()]
else:
vstr = ""
strings.append(vstr)
return strings
########################################################################
class ChanlunEngineManager(QtGui.QWidget):
"""chanlun引擎管理组件"""
signal = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, chanlunEngine, eventEngine, mainEngine, parent=None):
"""Constructor"""
super(ChanlunEngineManager, self).__init__(parent)
self.chanlunEngine = chanlunEngine
self.eventEngine = eventEngine
self.mainEngine = mainEngine
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
self.instrumentid = ''
self.initUi()
self.registerEvent()
# 记录日志
self.chanlunEngine.writeChanlunLog(u'缠论引擎启动成功')
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'缠论策略')
# 期货代码输入框
self.codeEdit = QtGui.QLineEdit()
self.codeEdit.setPlaceholderText(u'在此输入期货代码')
self.codeEdit.setMaximumWidth(200)
self.data = pd.DataFrame() #画图所需数据, 重要
self.fenX = [] #分笔分段所需X轴坐标
self.fenY = [] #分笔分段所需Y轴坐标
self.zhongshuPos = [] #中枢的位置
self.zhongShuType = [] #中枢的方向
# 金融图
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.TickW = None
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
# 按钮
penButton = QtGui.QPushButton(u'分笔')
segmentButton = QtGui.QPushButton(u'分段')
zhongshuButton = QtGui.QPushButton(u'走势中枢')
shopButton = QtGui.QPushButton(u'买卖点')
restoreButton = QtGui.QPushButton(u'还原')
penButton.clicked.connect(self.pen)
segmentButton.clicked.connect(self.segment)
zhongshuButton.clicked.connect(self.zhongShu)
shopButton.clicked.connect(self.shop)
restoreButton.clicked.connect(self.restore)
# Chanlun组件的日志监控
self.chanlunLogMonitor = QtGui.QTextEdit()
self.chanlunLogMonitor.setReadOnly(True)
self.chanlunLogMonitor.setMaximumHeight(180)
# 设置布局
self.hbox2 = QtGui.QHBoxLayout()
self.hbox2.addWidget(self.codeEdit)
self.hbox2.addWidget(penButton)
self.hbox2.addWidget(segmentButton)
self.hbox2.addWidget(zhongshuButton)
self.hbox2.addWidget(shopButton)
self.hbox2.addWidget(restoreButton)
self.hbox2.addStretch()
tickButton = QtGui.QPushButton(u'Tick')
oneMButton = QtGui.QPushButton(u"1分")
fiveMButton = QtGui.QPushButton(u'5分')
fifteenMButton = QtGui.QPushButton(u'15分')
thirtyMButton = QtGui.QPushButton(u'30分')
sixtyMButton = QtGui.QPushButton(u'60分')
dayButton = QtGui.QPushButton(u'日')
weekButton = QtGui.QPushButton(u'周')
monthButton = QtGui.QPushButton(u'月')
oneMButton.checked = True
self.vbox1 = QtGui.QVBoxLayout()
tickButton.clicked.connect(self.openTick)
oneMButton.clicked.connect(self.oneM)
fiveMButton.clicked.connect(self.fiveM)
fifteenMButton.clicked.connect(self.fifteenM)
thirtyMButton.clicked.connect(self.thirtyM)
sixtyMButton.clicked.connect(self.sixtyM)
dayButton.clicked.connect(self.daily)
weekButton.clicked.connect(self.weekly)
monthButton.clicked.connect(self.monthly)
self.vbox2 = QtGui.QVBoxLayout()
self.vbox1.addWidget(self.PriceW)
self.vbox2.addWidget(tickButton)
self.vbox2.addWidget(oneMButton)
self.vbox2.addWidget(fiveMButton)
self.vbox2.addWidget(fifteenMButton)
self.vbox2.addWidget(thirtyMButton)
self.vbox2.addWidget(sixtyMButton)
self.vbox2.addWidget(dayButton)
self.vbox2.addWidget(weekButton)
self.vbox2.addWidget(monthButton)
self.vbox2.addStretch()
self.hbox3 = QtGui.QHBoxLayout()
self.hbox3.addStretch()
self.hbox3.addLayout(self.vbox1)
self.hbox3.addLayout(self.vbox2)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addWidget(self.chanlunLogMonitor)
self.setLayout(self.vbox)
self.codeEdit.returnPressed.connect(self.updateSymbol)
#-----------------------------------------------------------------------
#从通联数据端获取历史数据
def downloadData(self, symbol, unit):
listBar = [] #K线数据
num = 0
#从通联客户端获取K线数据
historyDataEngine = HistoryDataEngine()
# unit为int型获取分钟数据,为String类型获取日周月K线数据
if type(unit) is types.IntType:
#从通联数据端获取当日分钟数据并存入数据库
historyDataEngine.downloadFuturesIntradayBar(symbol, unit)
# 从数据库获取前几天的分钟数据
cx = self.getDbData(symbol, unit)
if cx:
for data in cx:
barOpen = data['open']
barClose = data['close']
barLow = data['low']
barHigh = data['high']
barTime = data['datetime']
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
elif type(unit) is types.StringType:
data = historyDataEngine.downloadFuturesBar(symbol, unit)
if data:
for d in data:
barOpen = d.get('openPrice', 0)
barClose = d.get('closePrice', 0)
barLow = d.get('lowestPrice', 0)
barHigh = d.get('highestPrice', 0)
if unit == "daily":
barTime = d.get('tradeDate', '').replace('-', '')
else:
barTime = d.get('endDate', '').replace('-', '')
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
if unit == "monthly" or unit == "weekly":
listBar.reverse()
else:
print "参数格式错误"
return
#将List数据转换成dataFormat类型,方便处理
df = pd.DataFrame(listBar, columns=['num', 'time', 'open', 'close', 'low', 'high'])
df.index = df['time'].tolist()
df = df.drop('time', 1)
return df
#-----------------------------------------------------------------------
#从数据库获取前两天的分钟数据
def getDbData(self, symbol, unit):
#周六周日不交易,无分钟数据
# 给数据库命名
dbname = ''
days = 7
if unit == 1:
dbname = MINUTE_DB_NAME
elif unit == 5:
dbname = MINUTE5_DB_NAME
elif unit == 15:
dbname = MINUTE15_DB_NAME
elif unit == 30:
dbname = MINUTE30_DB_NAME
elif unit == 60:
dbname = MINUTE60_DB_NAME
weekday = datetime.now().weekday() # weekday() 返回的是0-6是星期一到星期日
if days == 2:
if weekday == 6:
aDay = timedelta(days=3)
elif weekday == 0 or weekday == 1:
aDay = timedelta(days=4)
else:
aDay = timedelta(days=2)
else:
aDay = timedelta(days=7)
startDate = (datetime.now() - aDay).strftime('%Y%m%d')
print startDate
if self.__mongoConnected:
collection = self.__mongoConnection[dbname][symbol]
cx = collection.find({'date': {'$gte': startDate}})
return cx
else:
return None
#----------------------------------------------------------------------------------
#"""合约变化"""
def updateSymbol(self):
# 读取组件数据
instrumentid = str(self.codeEdit.text())
self.chanlunEngine.writeChanlunLog(u'查询合约%s' % (instrumentid))
# 从通联数据客户端获取当日分钟数据
self.data = self.downloadData(instrumentid, 1)
if self.data.empty:
self.chanlunEngine.writeChanlunLog(u'合约%s 不存在' % (instrumentid))
else:
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (instrumentid))
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
# # 订阅合约[仿照ctaEngine.py写的]
# # 先取消订阅之前的合约,再订阅最新输入的合约
# contract = self.mainEngine.getContract(self.instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.unsubscribe(req, contract.gatewayName)
#
# contract = self.mainEngine.getContract(instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.subscribe(req, contract.gatewayName)
# else:
# self.chanlunEngine.writeChanlunLog(u'交易合约%s无法找到' % (instrumentid))
#
# # 重新注册事件监听
# self.eventEngine.unregister(EVENT_TICK + self.instrumentid, self.signal.emit)
# self.eventEngine.register(EVENT_TICK + instrumentid, self.signal.emit)
# 更新目前的合约
self.instrumentid = instrumentid
def oneM(self):
"打开1分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 1)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fiveM(self):
"打开5分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 5分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 5)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fifteenM(self):
"打开15分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 15分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 15)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def thirtyM(self):
"打开30分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 30分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 30)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def sixtyM(self):
"打开60分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 60分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 60)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def daily(self):
"""打开日K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 日K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "daily")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def weekly(self):
"""打开周K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 周K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "weekly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
def monthly(self):
"""打开月K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 月K线图' % (self.instrumentid))
# 从通联数据客户端获取数据并画图
self.data = self.downloadData(self.instrumentid, "monthly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def openTick(self):
"""切换成tick图"""
self.chanlunEngine.writeChanlunLog(u'打开tick图')
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.TickW = TickWidget(self.eventEngine, self.chanlunEngine)
self.vbox1.addWidget(self.TickW)
self.tickLoaded = True
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def restore(self):
"""还原初始k线状态"""
self.chanlunEngine.writeChanlunLog(u'还原加载成功')
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.data = self.downloadData(self.instrumentid, 1)
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data, self)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'还原为1分钟k线图')
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
# ----------------------------------------------------------------------
def pen(self):
"""加载分笔"""
# 先合并K线数据,记录新建PriceW之前合并K线的数据
if not self.penLoaded:
after_fenxing = self.judgeInclude() #判断self.data中K线数据的包含关系
# 清空画布时先remove已有的Widget再新建
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, after_fenxing)
self.vbox1.addWidget(self.PriceW)
#使用合并K线的数据重新画K线图
self.plotAfterFenXing(after_fenxing)
# 找出顶和底
fenxing_data, fenxing_type = self.findTopAndLow(after_fenxing)
arrayFenxingdata = np.array(fenxing_data)
arrayTypedata = np.array(fenxing_type)
self.fenY = []
self.fenX = [m[0] for m in arrayFenxingdata]
fenbiY1 = [m[4] for m in arrayFenxingdata] # 顶分型标志最高价
fenbiY2 = [m[3] for m in arrayFenxingdata] # 底分型标志最低价
for i in xrange(len(self.fenX)):
if arrayTypedata[i] == 1:
self.fenY.append(fenbiY1[i])
else:
self.fenY.append(fenbiY2[i])
if not self.penLoaded:
if self.fenX:
self.fenX.append(self.fenX[-1])
self.fenY.append(self.fenY[-1])
print "self.fenX: ", self.fenX
print "self.fenY: ", self.fenY
self.fenbi(self.fenX, self.fenY)
self.fenX.pop()
self.fenY.pop()
self.chanlunEngine.writeChanlunLog(u'分笔加载成功')
self.penLoaded = True
# ----------------------------------------------------------------------
def segment(self):
if not self.penLoaded:
self.pen() #先分笔才能分段
segmentX = [] #分段点X轴值
segmentY = [] #分段点Y轴值
temp_type = 0 #标志线段方向,向上为1,向下为-1, 未判断前三笔是否重合为0
i = 0
while i < len(self.fenX) - 4:
if temp_type == 0:
if self.fenY[i] > self.fenY[i+1] and self.fenY[i] > self.fenY[i+3]:
temp_type = -1 #向下线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
elif self.fenY[i] < self.fenY[i+1] and self.fenY[i] < self.fenY[i+3]:
temp_type = 1 #向上线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
else:
temp_type = 0
i += 1
continue
if temp_type == 1: #向上线段
j = i+1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: #记录顶底
high.append(self.fenY[j])
low.append(self.fenY[j+1])
j += 2
if self.fenY[i+4] < self.fenY[i+1]: #向上线段被向下笔破坏
j = 0
while j < len(high)-2:
# 顶底出现顶分型,向上线段结束
if high[j+1] > high[j] and high[j+1] > high[j+2]:
num = i + 2 * j + 3 #线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 #向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
else: #向上线段未被向下笔破坏
j = 1
while j < len(high)-2:
# 顶底出现底分型,向上线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 # 向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
elif temp_type == -1: # 向下线段
j = i + 1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: # 记录顶底
high.append(self.fenY[j + 1])
low.append(self.fenY[j])
j += 2
if self.fenY[i + 4] > self.fenY[i + 1]: # 向下线段被向上笔破坏
j = 0
while j < len(high) - 2:
# 顶底出现底分型,向下线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 3 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
else: # 向下线段未被向上笔破坏
j = 1
while j < len(high) - 2:
# 顶底出现顶分型,向下线段结束
if high[j + 1] > high[j] and high[j + 1] > high[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
print "segmentX: ", segmentX
print "segmentY: ", segmentY
if not self.segmentLoaded:
if len(segmentX) > 1:
segmentX.append(segmentX[-1])
segmentY.append(segmentY[-1])
segmentX = [int(x) for x in segmentX]
segmentY = [int(y) for y in segmentY]
self.fenduan(segmentX, segmentY)
self.chanlunEngine.writeChanlunLog(u'分段加载成功')
self.segmentLoaded = True
# ----------------------------------------------------------------------
def updateChanlunLog(self, event):
"""更新缠论相关日志"""
log = event.dict_['data']
# print type(log)
if(log.logTime):
content = '\t'.join([log.logTime, log.logContent])
self.chanlunLogMonitor.append(content)
else:
print 0
#-----------------------------------------------------------------------
def zhongShu(self):
if not self.penLoaded:
self.pen() # 先分笔才能画走势中枢
# temp_type = 0 # 标志中枢方向,向上为1,向下为-1
i = 0
temp_high, temp_low = 0, 0
minX, maxY = 0, 0
self.zhongshuPos = [] # 记录所有的中枢开始段和结束段的位置
self.zhongShuType = [] #记录所有中枢的方向
while i < len(self.fenX) - 4:
if (self.fenY[i] > self.fenY[i + 1] and self.fenY[i + 1] < self.fenY[i + 4]): #判断进入段方向
temp_low = max(self.fenY[i + 1], self.fenY[i + 3])
temp_high = min(self.fenY[i + 2], self.fenY[i + 4]) #记录中枢内顶的最小值与底的最大值
minX = self.fenX[i+1]
self.zhongshuPos.append(i)
self.zhongShuType.append(-1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high :
maxX = self.fenX[i+4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low :
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
if j == i:
break
elif (self.fenY[i] < self.fenY[i + 1] and self.fenY[i + 1] > self.fenY[i + 4]):
temp_high = min(self.fenY[i + 1], self.fenY[i + 3])
temp_low = max(self.fenY[i + 2], self.fenY[i + 4])
minX = self.fenX[i + 1]
self.zhongshuPos.append(i)
self.zhongShuType.append(1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
if i == j:
break
else:
i += 1
continue
# 画出当前判断出的中枢
if minX != 0 and maxX == 0:
maxX = self.fenX[i+4]
i = i + 1
self.zhongshuPos.append(i + 4)
else:
self.zhongshuPos.append(i + 3)
minY, maxY = temp_low, temp_high
print minX, minY, maxX, maxY
if int(maxY) > int(minY):
plotX = [minX, minX, maxX, maxX, minX]
plotY = [minY, maxY, maxY, minY, minY]
plotX = [int(x) for x in plotX]
plotY = [int(y) for y in plotY]
self.zhongshu(plotX, plotY)
i = i + 4
self.zhongShuLoaded = True
self.chanlunEngine.writeChanlunLog(u'走势中枢加载成功')
# ----------------------------------------------------------------------
def shop(self):
"""加载买卖点"""
if not self.zhongShuLoaded:
self.zhongShu()
i = 0
while i < len(self.zhongShuType) - 1:
startPos, endPos = self.zhongshuPos[2*i], self.zhongshuPos[2*i + 1] # 中枢开始段的位置和结束段的位置
startY = self.fenY[startPos + 1] - self.fenY[startPos] # 开始段Y轴距离
startX = self.fenX[startPos + 1] - self.fenX[startPos] # 开始段X轴距离
startK = abs(startY * startX) # 开始段投影面积
endY = self.fenY[endPos + 1] - self.fenY[endPos] # 结束段Y轴距离
endX = self.fenX[endPos + 1] - self.fenX[endPos] # 结束段段X轴距离
endK = abs(endY * endX) # 开始段投影面积
if endK < startK:
print startPos, endPos
if self.zhongShuType[i] == 1 and self.zhongShuType[i + 1] == -1:
# 一卖
self.sellpoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二卖,一卖后一个顶点
self.sellpoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三卖,一卖之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] > self.fenY[nextPos]:
self.sellpoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.sellpoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
elif self.zhongShuType[i] == -1 and self.zhongShuType[i + 1] == 1:
# 一买
self.buypoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二买,一买后一个底点
self.buypoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三买,一买之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] < self.fenY[nextPos]:
self.buypoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.buypoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
i = i + 1 # 接着判断之后的中枢是否出现背驰
self.chanlunEngine.writeChanlunLog(u'买卖点加载成功')
# ----------------------------------------------------------------------
def fenbi(self, fenbix, fenbiy):
self.PriceW.pw2.plotItem.plot(x=fenbix, y=fenbiy, pen=QtGui.QPen(QtGui.QColor(255, 236, 139)))
def fenduan(self, fenduanx, fenduany):
self.PriceW.pw2.plot(x=fenduanx, y=fenduany, symbol='o', pen=QtGui.QPen(QtGui.QColor(131, 111, 255)))
def zhongshu(self, zhongshux, zhongshuy):
self.PriceW.pw2.plot(x=zhongshux, y=zhongshuy, pen=QtGui.QPen(QtGui.QColor(255,165,0)))
def buypoint(self, buyx, buyy, point):
if point == 1:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(255,0,0), symbolPen=(255,0,0), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(238,130,238), symbolPen=(238,130,238),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(138,43,226), symbolPen=(138,43,226),symbol='star')
def sellpoint(self, sellx, selly, point):
if point == 1:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(119,172,48), symbolPen=(119,172,48), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(221,221,34), symbolPen=(221,221,34),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(179,158,77), symbolPen=(179,158,77),symbol='star')
# ----------------------------------------------------------------------
# 判断包含关系,仿照聚框,合并K线数据
def judgeInclude(self):
## 判断包含关系
k_data = self.data
# 保存分型后dataFrame的值
after_fenxing = pd.DataFrame()
temp_data = k_data[:1]
zoushi = [3] # 3-持平 4-向下 5-向上
for i in xrange(len(k_data)):
case1 = temp_data.high[-1] >= k_data.high[i] and temp_data.low[-1] <= k_data.low[i] # 第1根包含第2根
case2 = temp_data.high[-1] <= k_data.high[i] and temp_data.low[-1] >= k_data.low[i] # 第2根包含第1根
case3 = temp_data.high[-1] == k_data.high[i] and temp_data.low[-1] == k_data.low[i] # 第1根等于第2根
case4 = temp_data.high[-1] > k_data.high[i] and temp_data.low[-1] > k_data.low[i] # 向下趋势
case5 = temp_data.high[-1] < k_data.high[i] and temp_data.low[-1] < k_data.low[i] # 向上趋势
if case3:
zoushi.append(3)
continue
elif case1:
print temp_data
if zoushi[-1] == 4:
temp_data.ix[0, 4] = k_data.high[i] #向下走取高点的低点
else:
temp_data.ix[0, 3] = k_data.low[i] #向上走取低点的高点
elif case2:
temp_temp = temp_data[-1:]
temp_data = k_data[i:i + 1]
if zoushi[-1] == 4:
temp_data.ix[0, 4] = temp_temp.high[0]
else:
temp_data.ix[0, 3] = temp_temp.low[0]
elif case4:
zoushi.append(4)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
elif case5:
zoushi.append(5)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
return after_fenxing
# ----------------------------------------------------------------------
#画出合并后的K线图,分笔
def plotAfterFenXing(self, after_fenxing):
#判断包含关系,合并K线
for i in xrange(len(after_fenxing)):
#处理k线的最大最小值、开盘收盘价,合并后k线不显示影线。
after_fenxing.iloc[i, 0] = i
if after_fenxing.open[i] > after_fenxing.close[i]:
after_fenxing.iloc[i, 1] = after_fenxing.high[i]
after_fenxing.iloc[i, 2] = after_fenxing.low[i]
else:
after_fenxing.iloc[i, 1] = after_fenxing.low[i]
after_fenxing.iloc[i, 2] = after_fenxing.high[i]
self.PriceW.onBarAfterFenXing(i, after_fenxing.index[i], after_fenxing.open[i], after_fenxing.close[i], after_fenxing.low[i], after_fenxing.high[i])
self.PriceW.plotKlineAfterFenXing()
print "plotKLine after fenxing"
# ----------------------------------------------------------------------
# 找出顶和底
def findTopAndLow(self, after_fenxing):
temp_num = 0 # 上一个顶或底的位置
temp_high = 0 # 上一个顶的high值
temp_low = 0 # 上一个底的low值
temp_type = 0 # 上一个记录位置的类型
i = 1
fenxing_type = [] # 记录分型点的类型,1为顶分型,-1为底分型
fenxing_data = pd.DataFrame() # 分型点的DataFrame值
while (i < len(after_fenxing) - 1):
case1 = after_fenxing.high[i - 1] < after_fenxing.high[i] and after_fenxing.high[i] > after_fenxing.high[i + 1] # 顶分型
case2 = after_fenxing.low[i - 1] > after_fenxing.low[i] and after_fenxing.low[i] < after_fenxing.low[i + 1] # 底分型
if case1:
if temp_type == 1: # 如果上一个分型为顶分型,则进行比较,选取高点更高的分型
if after_fenxing.high[i] <= temp_high:
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif temp_type == 2: # 如果上一个分型为底分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_low >= after_fenxing.high[i]: # 如果上一个底分型的底比当前顶分型的顶高,则跳过当前顶分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(-1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif case2:
if temp_type == 2: # 如果上一个分型为底分型,则进行比较,选取低点更低的分型
if after_fenxing.low[i] >= temp_low:
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
elif temp_type == 1: # 如果上一个分型为顶分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_high <= after_fenxing.low[i]: # 如果上一个顶分型的底比当前底分型的底低,则跳过当前底分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
i += 1
# if fenxing_type:
# if fenxing_type[-1] == 1 and temp_type == 2:
# fenxing_type.append(-1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
#
# if fenxing_type[-1] == -1 and temp_type == 1:
# fenxing_type.append(1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
return fenxing_data, fenxing_type
# ----------------------------------------------------------------------
# 连接MongoDB数据库
def __connectMongo(self):
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
except ConnectionFailure:
pass
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.signal.connect(self.updateChanlunLog)
self.eventEngine.register(EVENT_CHANLUN_LOG, self.signal.emit)
########################################################################
class PriceWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
symbol = ''
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (n, t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(n, min), QtCore.QPointF(n, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(n-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, data, parent=None):
"""Constructor"""
super(PriceWidget, self).__init__(parent)
# K线图EMA均线的参数、变量
self.EMAFastAlpha = 0.0167 # 快速EMA的参数,60
self.EMASlowAlpha = 0.0083 # 慢速EMA的参数,120
self.fastEMA = 0 # 快速EMA的数值
self.slowEMA = 0 # 慢速EMA的数值
self.listfastEMA = []
self.listslowEMA = []
# 保存K线数据的列表对象
self.listBar = []
self.listClose = []
self.listHigh = []
self.listLow = []
self.listOpen = []
# 是否完成了历史数据的读取
self.initCompleted = False
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
self.data = data #画图所需数据
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
self.initUi()
# self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Price')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotKline() # plotKline初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotKline(self):
"""Kline"""
s = self.data.index #横坐标值
print "numbers of KLine: ", len(s)
xdict = dict(enumerate(s))
self.__axisTime = MyStringAxis(xdict, orientation='bottom')
self.pw2 = pg.PlotWidget(axisItems={'bottom': self.__axisTime}) # K线图
pw2x = self.pw2.getAxis('bottom')
pw2x.setGrid(150) # 设置默认x轴网格
pw2y = self.pw2.getAxis('left')
pw2y.setGrid(150) # 设置默认y轴网格
self.vbl_1.addWidget(self.pw2)
self.pw2.setMinimumWidth(1500)
self.pw2.setMaximumWidth(1800)
self.pw2.setDownsampling(mode='peak')
self.pw2.setClipToView(True)
self.curve5 = self.pw2.plot()
self.curve6 = self.pw2.plot()
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
## Draw an arrowhead next to the text box
# self.arrow = pg.ArrowItem()
# self.pw2.addItem(self.arrow)
# 从数据库读取一分钟数据画分钟线
def plotMin(self, symbol):
self.initCompleted = True
cx = self.__mongoMinDB[symbol].find()
print cx.count()
if cx:
for data in cx:
self.barOpen = data['open']
self.barClose = data['close']
self.barLow = data['low']
self.barHigh = data['high']
self.barOpenInterest = data['openInterest']
# print self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest
self.onBar(self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest)
self.num += 1
# 画历史数据K线图
def plotHistorticData(self):
self.initCompleted = True
for i in xrange(len(self.data)):
self.onBar(i, self.data.index[i], self.data.open[i], self.data.close[i], self.data.low[i], self.data.high[i])
self.plotKline()
print "plotKLine success"
#----------------------------------------------------------------------
def initHistoricalData(self):
"""初始历史数据"""
if self.symbol!='':
print "download histrical data:",self.symbol
self.initCompleted = True # 读取历史数据完成
td = timedelta(days=1) # 读取3天的历史TICK数据
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
print cx.count()
if cx:
for data in cx:
tick = Tick(data['symbol'])
tick.openPrice = data['lastPrice']
tick.highPrice = data['upperLimit']
tick.lowPrice = data['lowerLimit']
tick.lastPrice = data['lastPrice']
tick.volume = data['volume']
tick.openInterest = data['openInterest']
tick.upperLimit = data['upperLimit']
tick.lowerLimit = data['lowerLimit']
tick.time = data['time']
# tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['bidPrice1']
tick.bidPrice2 = data['bidPrice2']
tick.bidPrice3 = data['bidPrice3']
tick.bidPrice4 = data['bidPrice4']
tick.bidPrice5 = data['bidPrice5']
tick.askPrice1 = data['askPrice1']
tick.askPrice2 = data['askPrice2']
tick.askPrice3 = data['askPrice3']
tick.askPrice4 = data['askPrice4']
tick.askPrice5 = data['askPrice5']
tick.bidVolume1 = data['bidVolume1']
tick.bidVolume2 = data['bidVolume2']
tick.bidVolume3 = data['bidVolume3']
tick.bidVolume4 = data['bidVolume4']
tick.bidVolume5 = data['bidVolume5']
tick.askVolume1 = data['askVolume1']
tick.askVolume2 = data['askVolume2']
tick.askVolume3 = data['askVolume3']
tick.askVolume4 = data['askVolume4']
tick.askVolume5 = data['askVolume5']
self.onTick(tick)
print('load historic data completed')
#----------------------------------------------------------------------
def plotKline(self):
"""K线图"""
if self.initCompleted:
# 均线
self.curve5.setData(self.listfastEMA, pen=(255, 0, 0), name="Red curve")
self.curve6.setData(self.listslowEMA, pen=(0, 255, 0), name="Green curve")
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def plotText(self):
lenClose = len(self.listClose)
if lenClose >= 5: # Fractal Signal
if self.listClose[-1] > self.listClose[-2] and self.listClose[-3] > self.listClose[-2] and self.listClose[-4] > self.listClose[-2] and self.listClose[-5] > self.listClose[-2] and self.listfastEMA[-1] > self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listLow[-1]), angle=90, brush=(255, 0, 0))#红色
self.pw2.addItem(self.arrow)
elif self.listClose[-1] < self.listClose[-2] and self.listClose[-3] < self.listClose[-2] and self.listClose[-4] < self.listClose[-2] and self.listClose[-5] < self.listClose[-2] and self.listfastEMA[-1] < self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listHigh[-1]), angle=-90, brush=(0, 255, 0))#绿色
self.pw2.addItem(self.arrow)
#----------------------------------------------------------------------
def onBar(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
self.listOpen.append(o)
self.listClose.append(c)
self.listHigh.append(h)
self.listLow.append(l)
#计算K线图EMA均线
if self.fastEMA:
self.fastEMA = c*self.EMAFastAlpha + self.fastEMA*(1-self.EMAFastAlpha)
self.slowEMA = c*self.EMASlowAlpha + self.slowEMA*(1-self.EMASlowAlpha)
else:
self.fastEMA = c
self.slowEMA = c
self.listfastEMA.append(self.fastEMA)
self.listslowEMA.append(self.slowEMA)
self.plotText() #显示开仓位置
# ----------------------------------------------------------------------
#画合并后的K线Bar
def onBarAfterFenXing(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
def plotKlineAfterFenXing(self):
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoMinDB = self.__mongoConnection['VnTrader_1Min_Db']
except ConnectionFailure:
pass
########################################################################
class TickWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
# tick图的相关参数、变量
listlastPrice = np.empty(1000)
fastMA = 0
midMA = 0
slowMA = 0
listfastMA = np.empty(1000)
listmidMA = np.empty(1000)
listslowMA = np.empty(1000)
tickFastAlpha = 0.0333 # 快速均线的参数,30
tickMidAlpha = 0.0167 # 中速均线的参数,60
tickSlowAlpha = 0.0083 # 慢速均线的参数,120
ptr = 0
ticktime = None # tick数据时间
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
a = pg.AxisItem('bottom', pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True)
a.setFixedWidth(1)
a.setWidth(1)
a.setLabel(show=True)
a.setGrid(grid=True)
labelStyle = {'color': '#FFF', 'font-size': '14pt'}
a.setLabel('label text', units='V', **labelStyle)
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, parent=None):
"""Constructor"""
super(TickWidget, self).__init__(parent)
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
self.__mongoTickDB = None
# 调用函数
self.initUi()
self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Tick')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotTick() # plotTick初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotTick(self):
""""""
self.pw1 = pg.PlotWidget(name='Plot1')
self.vbl_1.addWidget(self.pw1)
self.pw1.setMinimumWidth(1500)
self.pw1.setMaximumWidth(1800)
self.pw1.setRange(xRange=[-360, 0])
self.pw1.setLimits(xMax=5)
self.pw1.setDownsampling(mode='peak')
self.pw1.setClipToView(True)
self.curve1 = self.pw1.plot()
self.curve2 = self.pw1.plot()
self.curve3 = self.pw1.plot()
self.curve4 = self.pw1.plot()
# #----------------------------------------------------------------------
# def initHistoricalData(self,startDate=None):
# """初始历史数据"""
# print "download histrical data"
# self.initCompleted = True # 读取历史数据完成
# td = timedelta(days=1) # 读取3天的历史TICK数据
#
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
#
# print cx.count()
#
# if cx:
# for data in cx:
# tick = Tick(data['symbol'])
#
# tick.openPrice = data['lastPrice']
# tick.highPrice = data['upperLimit']
# tick.lowPrice = data['lowerLimit']
# tick.lastPrice = data['lastPrice']
#
# tick.volume = data['volume']
# tick.openInterest = data['openInterest']
#
# tick.upperLimit = data['upperLimit']
# tick.lowerLimit = data['lowerLimit']
#
# tick.time = data['time']
# # tick.ms = data['UpdateMillisec']
#
# tick.bidPrice1 = data['bidPrice1']
# tick.bidPrice2 = data['bidPrice2']
# tick.bidPrice3 = data['bidPrice3']
# tick.bidPrice4 = data['bidPrice4']
# tick.bidPrice5 = data['bidPrice5']
#
# tick.askPrice1 = data['askPrice1']
# tick.askPrice2 = data['askPrice2']
# tick.askPrice3 = data['askPrice3']
# tick.askPrice4 = data['askPrice4']
# tick.askPrice5 = data['askPrice5']
#
# tick.bidVolume1 = data['bidVolume1']
# tick.bidVolume2 = data['bidVolume2']
# tick.bidVolume3 = data['bidVolume3']
# tick.bidVolume4 = data['bidVolume4']
# tick.bidVolume5 = data['bidVolume5']
#
# tick.askVolume1 = data['askVolume1']
# tick.askVolume2 = data['askVolume2']
# tick.askVolume3 = data['askVolume3']
# tick.askVolume4 = data['askVolume4']
# tick.askVolume5 = data['askVolume5']
#
# self.onTick(tick)
#
# print('load historic data completed')
#----------------------------------------------------------------------
def plotTick(self):
"""画tick图"""
self.curve1.setData(self.listlastPrice[:self.ptr])
self.curve2.setData(self.listfastMA[:self.ptr], pen=(255, 0, 0), name="Red curve")
self.curve3.setData(self.listmidMA[:self.ptr], pen=(0, 255, 0), name="Green curve")
self.curve4.setData(self.listslowMA[:self.ptr], pen=(0, 0, 255), name="Blue curve")
self.curve1.setPos(-self.ptr, 0)
self.curve2.setPos(-self.ptr, 0)
self.curve3.setPos(-self.ptr, 0)
self.curve4.setPos(-self.ptr, 0)
#----------------------------------------------------------------------
def updateMarketData(self, event):
"""更新行情"""
data = event.dict_['data']
print "update", data['InstrumentID']
symbol = data['InstrumentID']
tick = Tick(symbol)
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
tick.time = data['UpdateTime']
tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['BidPrice1']
tick.bidPrice2 = data['BidPrice2']
tick.bidPrice3 = data['BidPrice3']
tick.bidPrice4 = data['BidPrice4']
tick.bidPrice5 = data['BidPrice5']
tick.askPrice1 = data['AskPrice1']
tick.askPrice2 = data['AskPrice2']
tick.askPrice3 = data['AskPrice3']
tick.askPrice4 = data['AskPrice4']
tick.askPrice5 = data['AskPrice5']
tick.bidVolume1 = data['BidVolume1']
tick.bidVolume2 = data['BidVolume2']
tick.bidVolume3 = data['BidVolume3']
tick.bidVolume4 = data['BidVolume4']
tick.bidVolume5 = data['BidVolume5']
tick.askVolume1 = data['AskVolume1']
tick.askVolume2 = data['AskVolume2']
tick.askVolume3 = data['AskVolume3']
tick.askVolume4 = data['AskVolume4']
tick.askVolume5 = data['AskVolume5']
self.onTick(tick) # tick数据更新
self.__recordTick(tick) #记录Tick数据
#----------------------------------------------------------------------
def onTick(self, tick):
"""tick数据更新"""
from datetime import time
# 首先生成datetime.time格式的时间(便于比较),从字符串时间转化为time格式的时间
hh, mm, ss = tick.time.split(':')
self.ticktime = time(int(hh), int(mm), int(ss), microsecond=tick.ms)
# 计算tick图的相关参数
if self.ptr == 0:
self.fastMA = tick.lastPrice
self.midMA = tick.lastPrice
self.slowMA = tick.lastPrice
else:
self.fastMA = (1-self.tickFastAlpha) * self.fastMA + self.tickFastAlpha * tick.lastPrice
self.midMA = (1-self.tickMidAlpha) * self.midMA + self.tickMidAlpha * tick.lastPrice
self.slowMA = (1-self.tickSlowAlpha) * self.slowMA + self.tickSlowAlpha * tick.lastPrice
self.listlastPrice[self.ptr] = int(tick.lastPrice)
self.listfastMA[self.ptr] = int(self.fastMA)
self.listmidMA[self.ptr] = int(self.midMA)
self.listslowMA[self.ptr] = int(self.slowMA)
self.ptr += 1
print(self.ptr)
if self.ptr >= self.listlastPrice.shape[0]:
tmp = self.listlastPrice
self.listlastPrice = np.empty(self.listlastPrice.shape[0] * 2)
self.listlastPrice[:tmp.shape[0]] = tmp
tmp = self.listfastMA
self.listfastMA = np.empty(self.listfastMA.shape[0] * 2)
self.listfastMA[:tmp.shape[0]] = tmp
tmp = self.listmidMA
self.listmidMA = np.empty(self.listmidMA.shape[0] * 2)
self.listmidMA[:tmp.shape[0]] = tmp
tmp = self.listslowMA
self.listslowMA = np.empty(self.listslowMA.shape[0] * 2)
self.listslowMA[:tmp.shape[0]] = tmp
# 调用画图函数
self.plotTick() # tick图
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoTickDB = self.__mongoConnection['VnTrader_Tick_Db']
except ConnectionFailure:
pass
#----------------------------------------------------------------------
def __recordTick(self, data):
"""将Tick数据插入到MongoDB中"""
if self.__mongoConnected:
symbol = data['InstrumentID']
data['date'] = datetime.now().strftime('%Y%m%d')
self.__mongoTickDB[symbol].insert(data)
# #----------------------------------------------------------------------
# def loadTick(self, symbol, startDate, endDate=None):
# """从MongoDB中读取Tick数据"""
# cx = self.__mongoTickDB[symbol].find()
# print cx.count()
# return cx
# # if self.__mongoConnected:
# # collection = self.__mongoTickDB[symbol]
# #
# # # 如果输入了读取TICK的最后日期
# # if endDate:
# # cx = collection.find({'date': {'$gte': startDate, '$lte': endDate}})
# # else:
# # cx = collection.find({'date': {'$gte': startDate}})
# # return cx
# # else:
# # return None
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
print "connect"
self.signal.connect(self.updateMarketData)
self.__eventEngine.register(EVENT_MARKETDATA, self.signal.emit)
class Tick:
"""Tick数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.openPrice = 0 # OHLC
self.highPrice = 0
self.lowPrice = 0
self.lastPrice = 0
self.volume = 0 # 成交量
self.openInterest = 0 # 持仓量
self.upperLimit = 0 # 涨停价
self.lowerLimit = 0 # 跌停价
self.time = '' # 更新时间和毫秒
self.ms = 0
self.bidPrice1 = 0 # 深度行情
self.bidPrice2 = 0
self.bidPrice3 = 0
self.bidPrice4 = 0
self.bidPrice5 = 0
self.askPrice1 = 0
self.askPrice2 = 0
self.askPrice3 = 0
self.askPrice4 = 0
self.askPrice5 = 0
self.bidVolume1 = 0
self.bidVolume2 = 0
self.bidVolume3 = 0
self.bidVolume4 = 0
self.bidVolume5 = 0
self.askVolume1 = 0
self.askVolume2 = 0
self.askVolume3 = 0
self.askVolume4 = 0
self.askVolume5 = 0 | mit |
bgris/ODL_bgris | lib/python3.5/site-packages/odl/util/graphics.py | 1 | 15419 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Functions for graphical output."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
from odl.util.testutils import run_doctests
from odl.util.utility import is_real_dtype
__all__ = ('show_discrete_data',)
def _safe_minmax(values):
"""Calculate min and max of array with guards for nan and inf."""
# Nan and inf guarded min and max
minval = np.min(values[np.isfinite(values)])
maxval = np.max(values[np.isfinite(values)])
return minval, maxval
def _colorbar_ticks(minval, maxval):
"""Return the ticks (values show) in the colorbar."""
return [minval, (maxval + minval) / 2., maxval]
def _digits(minval, maxval):
"""Digits needed to comforatbly display values in [minval, maxval]"""
if minval == maxval:
return 3
else:
return min(10, max(2, int(1 + abs(np.log10(maxval - minval)))))
def _colorbar_format(minval, maxval):
"""Return the format string for the colorbar."""
return '%.{}f'.format(_digits(minval, maxval))
def _axes_info(grid, npoints=5):
result = []
min_pt = grid.min()
max_pt = grid.max()
for axis in range(grid.ndim):
xmin = min_pt[axis]
xmax = max_pt[axis]
points = np.linspace(xmin, xmax, npoints)
indices = np.linspace(0, grid.shape[axis] - 1, npoints, dtype=int)
tick_values = grid.coord_vectors[axis][indices]
# Do not use corner point in case of a partition, use outer corner
tick_values[[0, -1]] = xmin, xmax
format_str = '{:.' + str(_digits(xmin, xmax)) + 'f}'
tick_labels = [format_str.format(f) for f in tick_values]
result += [(points, tick_labels)]
return result
def show_discrete_data(values, grid, title=None, method='',
force_show=False, fig=None, **kwargs):
"""Display a discrete 1d or 2d function.
Parameters
----------
values : `numpy.ndarray`
The values to visualize
grid : `TensorGrid` or `RectPartition`
Grid of the values
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
'plot' : graph plot
'scatter' : scattered 2d points
(2nd axis <-> value)
2d methods:
'imshow' : image plot with coloring according to value,
including a colorbar.
'scatter' : cloud of scattered 3d points
(3rd axis <-> value)
'wireframe', 'plot_wireframe' : surface plot
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as the figure
given by this function. The most common usecase is that fig is the
return value from an earlier call to this function.
Default: New figure
interp : {'nearest', 'linear'}, optional
Interpolation method to use.
Default: 'nearest'
axis_labels : string, optional
Axis labels, default: ['x', 'y']
update_in_place : bool, optional
Update the content of the figure in place. Intended for faster real
time plotting, typically ~5 times faster.
This is only performed for ``method == 'imshow'`` with real data and
``fig != None``. Otherwise this parameter is treated as False.
Default: False
axis_fontsize : int, optional
Fontsize for the axes. Default: 16
kwargs : {'figsize', 'saveto', ...}
Extra keyword arguments passed on to display method
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
matplotlib.pyplot.plot : Show graph plot
matplotlib.pyplot.imshow : Show data as image
matplotlib.pyplot.scatter : Show scattered 3d points
"""
# Importing pyplot takes ~2 sec, only import when needed.
import matplotlib.pyplot as plt
args_re = []
args_im = []
dsp_kwargs = {}
sub_kwargs = {}
arrange_subplots = (121, 122) # horzontal arrangement
# Create axis labels which remember their original meaning
axis_labels = kwargs.pop('axis_labels', ['x', 'y'])
values_are_complex = not is_real_dtype(values.dtype)
figsize = kwargs.pop('figsize', None)
saveto = kwargs.pop('saveto', None)
interp = kwargs.pop('interp', 'nearest')
axis_fontsize = kwargs.pop('axis_fontsize', 16)
# Check if we should and can update the plot in place
update_in_place = kwargs.pop('update_in_place', False)
if (update_in_place and
(fig is None or values_are_complex or values.ndim != 2 or
(values.ndim == 2 and method not in ('', 'imshow')))):
update_in_place = False
if values.ndim == 1: # TODO: maybe a plotter class would be better
if not method:
if interp == 'nearest':
method = 'step'
dsp_kwargs['where'] = 'mid'
elif interp == 'linear':
method = 'plot'
else:
method = 'plot'
if method == 'plot' or method == 'step' or method == 'scatter':
args_re += [grid.coord_vectors[0], values.real]
args_im += [grid.coord_vectors[0], values.imag]
else:
raise ValueError('`method` {!r} not supported'
''.format(method))
elif values.ndim == 2:
if not method:
method = 'imshow'
if method == 'imshow':
args_re = [np.rot90(values.real)]
args_im = [np.rot90(values.imag)] if values_are_complex else []
extent = [grid.min()[0], grid.max()[0],
grid.min()[1], grid.max()[1]]
if interp == 'nearest':
interpolation = 'nearest'
elif interp == 'linear':
interpolation = 'bilinear'
else:
interpolation = 'none'
dsp_kwargs.update({'interpolation': interpolation,
'cmap': 'bone',
'extent': extent,
'aspect': 'auto'})
elif method == 'scatter':
pts = grid.points()
args_re = [pts[:, 0], pts[:, 1], values.ravel().real]
args_im = ([pts[:, 0], pts[:, 1], values.ravel().imag]
if values_are_complex else [])
sub_kwargs.update({'projection': '3d'})
elif method in ('wireframe', 'plot_wireframe'):
method = 'plot_wireframe'
x, y = grid.meshgrid
args_re = [x, y, np.rot90(values.real)]
args_im = ([x, y, np.rot90(values.imag)] if values_are_complex
else [])
sub_kwargs.update({'projection': '3d'})
else:
raise ValueError('`method` {!r} not supported'
''.format(method))
else:
raise NotImplementedError('no method for {}d display implemented'
''.format(values.ndim))
# Additional keyword args are passed on to the display method
dsp_kwargs.update(**kwargs)
if fig is not None:
# Reuse figure if given as input
if not isinstance(fig, plt.Figure):
raise TypeError('`fig` {} not a matplotlib figure'.format(fig))
if not plt.fignum_exists(fig.number):
# If figure does not exist, user either closed the figure or
# is using IPython, in this case we need a new figure.
fig = plt.figure(figsize=figsize)
updatefig = False
else:
# Set current figure to given input
fig = plt.figure(fig.number)
updatefig = True
if values.ndim > 1 and not update_in_place:
# If the figure is larger than 1d, we can clear it since we
# dont reuse anything. Keeping it causes performance problems.
fig.clf()
else:
fig = plt.figure(figsize=figsize)
updatefig = False
if values_are_complex:
# Real
if len(fig.axes) == 0:
# Create new axis if needed
sub_re = plt.subplot(arrange_subplots[0], **sub_kwargs)
sub_re.set_title('Real part')
sub_re.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub_re.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub_re.set_ylabel('value')
else:
sub_re = fig.axes[0]
display_re = getattr(sub_re, method)
csub_re = display_re(*args_re, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow' and len(fig.axes) < 2:
# Create colorbar if none seems to exist
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval_re, maxval_re = _safe_minmax(values.real)
else:
minval_re, maxval_re = kwargs['clim']
ticks_re = _colorbar_ticks(minval_re, maxval_re)
format_re = _colorbar_format(minval_re, maxval_re)
plt.colorbar(csub_re, orientation='horizontal',
ticks=ticks_re, format=format_re)
# Imaginary
if len(fig.axes) < 3:
sub_im = plt.subplot(arrange_subplots[1], **sub_kwargs)
sub_im.set_title('Imaginary part')
sub_im.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub_im.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub_im.set_ylabel('value')
else:
sub_im = fig.axes[2]
display_im = getattr(sub_im, method)
csub_im = display_im(*args_im, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow' and len(fig.axes) < 4:
# Create colorbar if none seems to exist
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval_im, maxval_im = _safe_minmax(values.imag)
else:
minval_im, maxval_im = kwargs['clim']
ticks_im = _colorbar_ticks(minval_im, maxval_im)
format_im = _colorbar_format(minval_im, maxval_im)
plt.colorbar(csub_im, orientation='horizontal',
ticks=ticks_im, format=format_im)
else:
if len(fig.axes) == 0:
# Create new axis object if needed
sub = plt.subplot(111, **sub_kwargs)
sub.set_xlabel(axis_labels[0], fontsize=axis_fontsize)
if values.ndim == 2:
sub.set_ylabel(axis_labels[1], fontsize=axis_fontsize)
else:
sub.set_ylabel('value')
try:
# For 3d plots
sub.set_zlabel('z')
except AttributeError:
pass
else:
sub = fig.axes[0]
if update_in_place:
import matplotlib as mpl
imgs = [obj for obj in sub.get_children()
if isinstance(obj, mpl.image.AxesImage)]
if len(imgs) > 0 and updatefig:
imgs[0].set_data(args_re[0])
csub = imgs[0]
# Update min-max
if 'clim' not in kwargs:
minval, maxval = _safe_minmax(values)
else:
minval, maxval = kwargs['clim']
csub.set_clim(minval, maxval)
else:
display = getattr(sub, method)
csub = display(*args_re, **dsp_kwargs)
else:
display = getattr(sub, method)
csub = display(*args_re, **dsp_kwargs)
# Axis ticks
if method == 'imshow' and not grid.is_uniform:
(xpts, xlabels), (ypts, ylabels) = _axes_info(grid)
plt.xticks(xpts, xlabels)
plt.yticks(ypts, ylabels)
if method == 'imshow':
# Add colorbar
# Use clim from kwargs if given
if 'clim' not in kwargs:
minval, maxval = _safe_minmax(values)
else:
minval, maxval = kwargs['clim']
ticks = _colorbar_ticks(minval, maxval)
format = _colorbar_format(minval, maxval)
if len(fig.axes) < 2:
# Create colorbar if none seems to exist
plt.colorbar(mappable=csub, ticks=ticks, format=format)
elif update_in_place:
# If it exists and we should update it
csub.colorbar.set_clim(minval, maxval)
csub.colorbar.set_ticks(ticks)
csub.colorbar.set_ticklabels([format % tick for tick in ticks])
csub.colorbar.draw_all()
# Fixes overlapping stuff at the expense of potentially squashed subplots
if not update_in_place:
fig.tight_layout()
if title is not None:
if not values_are_complex:
# Do not overwrite title for complex values
plt.title(title)
fig.canvas.manager.set_window_title(title)
if updatefig or plt.isinteractive():
# If we are running in interactive mode, we can always show the fig
# This causes an artifact, where users of `CallbackShow` without
# interactive mode only shows the figure after the second iteration.
plt.show(block=False)
if not update_in_place:
plt.draw()
plt.pause(0.0001)
else:
try:
sub.draw_artist(csub)
fig.canvas.blit(fig.bbox)
fig.canvas.update()
fig.canvas.flush_events()
except AttributeError:
plt.draw()
plt.pause(0.0001)
if force_show:
plt.show()
if saveto is not None:
fig.savefig(saveto)
return fig
if __name__ == '__main__':
run_doctests()
| gpl-3.0 |
emoronayuso/beeton | asterisk-bee/asteriskbee/api_status/scripts_graficas/recoge_marcas_graficas.py | 1 | 2307 | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
#import calendar
from datetime import datetime
from django.conf import settings
settings.configure()
import os
#para conexion con la bases de datos de beeton (asteriskbee)
import sqlite3 as dbapi
##Directorio de la aplicaion
### STATIC_ROOT = '/var/www/asterisk-bee/asteriskbee/'
#directorio = settings.STATIC_ROOT+"api_status/"
directorio = "/var/www/asterisk-bee/asteriskbee/api_status/"
##Numero de tuplas maximas por grafica
num_cpu_dia = 20
def recoge_marcas():
#Conexion con la base de datos de estadisticas
bbdd = dbapi.connect(directorio+"bbdd/estadisticas.db")
cursor = bbdd.cursor()
os.system("ps -e -o pcpu,cpu,nice,state,cputime,args --sort pcpu | sed '/^ 0.0 /d' > "+directorio+"scripts_graficas/temp/temp_cpu_dia; cat "+directorio+"scripts_graficas/temp/temp_cpu_dia | sed 's/^[ \t]*//;s/[ \t]*$//' | grep -v 'recoge_marcas_graficas.py' | cut -d ' ' -f 1 > "+directorio+"scripts_graficas/temp/temp_cpu_dia2")
total = 0.0
f = open(directorio+'scripts_graficas/temp/temp_cpu_dia2','r')
##Leemos la primera linea para quitar el encabezado
linea = f.readline()
while True:
linea = f.readline()
if not linea:
break
#Quitamos el uso de la cpu del script que recoge las marcas
else:
total = total + float(linea)
f.close()
res = total
# print str(res)
#Creamos la consulta ordenada por fecha
con_ordenada = """select * from api_status_marcas_graficas where tipo='cpu_dia' order by fecha_hora;"""
cursor.execute(con_ordenada)
p = cursor.fetchall()
if len(p) < num_cpu_dia:
#insetar en al base de datos
insert = "insert into api_status_marcas_graficas (tipo,valor) values ('cpu_dia',?);"
cursor.execute(insert ,(res,))
bbdd.commit()
else:
#Ordenar por fecha, eliminar el ultimo e introducir nuevo
# strftime('%d-%m-%Y %H:%M',calldate)
hora_actual = datetime.now()
con_update = " update api_status_marcas_graficas set fecha_hora=datetime(?),valor=? where id=?; "
# print "Antes del update, hora_actual->"+str(hora_actual)+"valor->"+str(res)+ " id->"+str(p[0][0])
cursor.execute(con_update ,(hora_actual,res,p[0][0]))
bbdd.commit()
##Cerramos la conexion con la BBDD
cursor.close()
bbdd.close()
if __name__ == "__main__":
recoge_marcas()
| gpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_inst/padova_inst_0/fullgrid/UV1.py | 31 | 9315 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17] #1531
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
CTSRD-SOAAP/chromium-42.0.2311.135 | native_client/buildbot/buildbot_selector.py | 1 | 18629 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform
python = sys.executable
bash = '/bin/bash'
echo = 'echo'
BOT_ASSIGNMENT = {
######################################################################
# Buildbots.
######################################################################
'xp-newlib-opt':
python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
'xp-glibc-opt':
python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',
'xp-bare-newlib-opt':
python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
'xp-bare-glibc-opt':
python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',
'precise-64-validator-opt':
python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
# Clang.
'precise_64-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
'mac10.7-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
# ASan.
'precise_64-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
'mac10.7-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',
# PNaCl.
'oneiric_32-newlib-arm_hw-pnacl-panda-dbg':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-dbg',
'oneiric_32-newlib-arm_hw-pnacl-panda-opt':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-opt',
'precise_64-newlib-arm_qemu-pnacl-dbg':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-dbg',
'precise_64-newlib-arm_qemu-pnacl-opt':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-opt',
'precise_64-newlib-x86_32-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
'precise_64-newlib-x86_64-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'mac10.8-newlib-opt-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'win7-64-newlib-opt-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'precise_64-newlib-mips-pnacl':
echo + ' "TODO(mseaborn): add mips"',
# PNaCl Spec
'precise_64-newlib-arm_qemu-pnacl-buildonly-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-buildonly',
'oneiric_32-newlib-arm_hw-pnacl-panda-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-hw',
'lucid_64-newlib-x86_32-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-x8632',
'lucid_64-newlib-x86_64-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-x8664',
# NaCl Spec
'lucid_64-newlib-x86_32-spec':
bash + ' buildbot/buildbot_spec2k.sh nacl-x8632',
'lucid_64-newlib-x86_64-spec':
bash + ' buildbot/buildbot_spec2k.sh nacl-x8664',
# Android bots.
'precise64-newlib-dbg-android':
python + ' buildbot/buildbot_standard.py dbg arm newlib --android',
'precise64-newlib-opt-android':
python + ' buildbot/buildbot_standard.py opt arm newlib --android',
# Valgrind bots.
'precise-64-newlib-dbg-valgrind':
echo + ' "Valgrind bots are disabled: see '
'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
'precise-64-glibc-dbg-valgrind':
echo + ' "Valgrind bots are disabled: see '
'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
# Coverage.
'mac10.6-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'precise-64-32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
'precise-64-64-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'xp-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
######################################################################
# Trybots.
######################################################################
'nacl-precise64_validator_opt':
python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
'nacl-precise64_newlib_dbg_valgrind':
bash + ' buildbot/buildbot_valgrind.sh newlib',
'nacl-precise64_glibc_dbg_valgrind':
bash + ' buildbot/buildbot_valgrind.sh glibc',
# Android trybots.
'nacl-precise64-newlib-dbg-android':
python + ' buildbot/buildbot_standard.py dbg arm newlib --android',
'nacl-precise64-newlib-opt-android':
python + ' buildbot/buildbot_standard.py opt arm newlib --android',
# Coverage trybots.
'nacl-mac10.6-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'nacl-precise-64-32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
'nacl-precise-64-64-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'nacl-win32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
# Clang trybots.
'nacl-precise_64-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
'nacl-mac10.6-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
# ASan.
'nacl-precise_64-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
'nacl-mac10.7-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',
# Pnacl main trybots
'nacl-precise_64-newlib-arm_qemu-pnacl':
bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu arm',
'nacl-precise_64-newlib-x86_32-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
'nacl-precise_64-newlib-x86_64-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'nacl-precise_64-newlib-mips-pnacl':
echo + ' "TODO(mseaborn): add mips"',
'nacl-arm_opt_panda':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-try',
'nacl-arm_hw_opt_panda':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-try',
'nacl-mac10.8_newlib_opt_pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'nacl-win7_64_newlib_opt_pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
# Pnacl spec2k trybots
'nacl-precise_64-newlib-x86_32-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8632',
'nacl-precise_64-newlib-x86_64-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8664',
'nacl-arm_perf_panda':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-buildonly',
'nacl-arm_hw_perf_panda':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-hw',
# Toolchain glibc.
'precise64-glibc': bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
'mac-glibc': bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
'win7-glibc': 'buildbot\\buildbot_windows-glibc-makefile.bat',
# Toolchain newlib x86.
'win7-toolchain_x86': 'buildbot\\buildbot_toolchain_win.bat',
'mac-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh mac',
'precise64-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh linux',
# Toolchain newlib arm.
'win7-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build',
'mac-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build',
'precise64-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' --test_toolchain nacl_arm_newlib'
' toolchain_build',
# BIONIC toolchain builders.
'precise64-toolchain_bionic':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build_bionic',
# Pnacl toolchain builders.
'linux-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-32',
'linux-pnacl-x86_64':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-64',
'mac-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot',
'win-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot',
# Pnacl toolchain testers
'linux-pnacl-x86_64-tests-x86_64':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-64',
'linux-pnacl-x86_64-tests-x86_32':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-32',
'linux-pnacl-x86_64-tests-arm':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot arm',
# MIPS toolchain buildbot.
'linux-pnacl-x86_32-tests-mips':
bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu mips32',
# Toolchain trybots.
'nacl-toolchain-precise64-newlib':
bash + ' buildbot/buildbot_toolchain.sh linux',
'nacl-toolchain-mac-newlib': bash + ' buildbot/buildbot_toolchain.sh mac',
'nacl-toolchain-win7-newlib': 'buildbot\\buildbot_toolchain_win.bat',
'nacl-toolchain-precise64-newlib-arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' --test_toolchain nacl_arm_newlib'
' toolchain_build',
'nacl-toolchain-mac-newlib-arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' toolchain_build',
'nacl-toolchain-win7-newlib-arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' toolchain_build',
'nacl-toolchain-precise64-glibc':
bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
'nacl-toolchain-mac-glibc':
bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
'nacl-toolchain-win7-glibc':
'buildbot\\buildbot_windows-glibc-makefile.bat',
# Pnacl toolchain trybots.
'nacl-toolchain-linux-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-32',
'nacl-toolchain-linux-pnacl-x86_64':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64',
'nacl-toolchain-linux-pnacl-mips': echo + ' "TODO(mseaborn)"',
'nacl-toolchain-mac-pnacl-x86_32':
python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
'nacl-toolchain-win7-pnacl-x86_64':
python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
# Sanitizer Pnacl toolchain trybots.
'nacl-toolchain-asan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize address --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize memory not understood.
'nacl-toolchain-msan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize memory --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize thread not understood.
'nacl-toolchain-tsan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize thread --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize undefined not understood.
'nacl-toolchain-ubsan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize undefined --skip-tests',
}
special_for_arm = [
'win7_64',
'win7-64',
'lucid-64',
'lucid64',
'precise-64',
'precise64'
]
for platform in [
'vista', 'win7', 'win8', 'win',
'mac10.6', 'mac10.7', 'mac10.8',
'lucid', 'precise'] + special_for_arm:
if platform in special_for_arm:
arch_variants = ['arm']
else:
arch_variants = ['', '32', '64', 'arm']
for arch in arch_variants:
arch_flags = ''
real_arch = arch
arch_part = '-' + arch
# Disable GYP build for win32 bots and arm cross-builders. In this case
# "win" means Windows XP, not Vista, Windows 7, etc.
#
# Building via GYP always builds all toolchains by default, but the win32
# XP pnacl builds are pathologically slow (e.g. ~38 seconds per compile on
# the nacl-win32_glibc_opt trybot). There are other builders that test
# Windows builds via gyp, so the reduced test coverage should be slight.
if arch == 'arm' or (platform == 'win' and arch == '32'):
arch_flags += ' --no-gyp'
if platform == 'win7' and arch == '32':
arch_flags += ' --no-goma'
if arch == '':
arch_part = ''
real_arch = '32'
# Test with Breakpad tools only on basic Linux builds.
if sys.platform.startswith('linux'):
arch_flags += ' --use-breakpad-tools'
for mode in ['dbg', 'opt']:
for libc in ['newlib', 'glibc']:
# Buildbots.
for bare in ['', '-bare']:
name = platform + arch_part + bare + '-' + libc + '-' + mode
assert name not in BOT_ASSIGNMENT, name
BOT_ASSIGNMENT[name] = (
python + ' buildbot/buildbot_standard.py ' +
mode + ' ' + real_arch + ' ' + libc + arch_flags)
# Trybots
for arch_sep in ['', '-', '_']:
name = 'nacl-' + platform + arch_sep + arch + '_' + libc + '_' + mode
assert name not in BOT_ASSIGNMENT, name
BOT_ASSIGNMENT[name] = (
python + ' buildbot/buildbot_standard.py ' +
mode + ' ' + real_arch + ' ' + libc + arch_flags)
def EscapeJson(data):
return '"' + json.dumps(data).replace('"', r'\"') + '"'
def HasNoPerfResults(builder):
if 'pnacl-buildonly-spec' in builder:
return True
return builder in [
'mac-toolchain_arm',
'win-pnacl-x86_32',
'linux-pnacl-x86_32-tests-mips',
'precise64-toolchain_bionic',
]
def Main():
builder = os.environ.get('BUILDBOT_BUILDERNAME')
build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
build_revision = os.environ.get('BUILDBOT_GOT_REVISION',
os.environ.get('BUILDBOT_REVISION'))
slave_type = os.environ.get('BUILDBOT_SLAVE_TYPE')
cmd = BOT_ASSIGNMENT.get(builder)
if not cmd:
sys.stderr.write('ERROR - unset/invalid builder name\n')
sys.exit(1)
env = os.environ.copy()
# Don't write out .pyc files because in cases in which files move around or
# the PYTHONPATH / sys.path change, old .pyc files can be mistakenly used.
# This avoids the need for admin changes on the bots in this case.
env['PYTHONDONTWRITEBYTECODE'] = '1'
# Use .boto file from home-dir instead of buildbot supplied one.
if 'AWS_CREDENTIAL_FILE' in env:
del env['AWS_CREDENTIAL_FILE']
alt_boto = os.path.expanduser('~/.boto')
if os.path.exists(alt_boto):
env['BOTO_CONFIG'] = alt_boto
cwd_drive = os.path.splitdrive(os.getcwd())[0]
env['GSUTIL'] = cwd_drive + '/b/build/third_party/gsutil/gsutil'
# When running from cygwin, we sometimes want to use a native python.
# The native python will use the depot_tools version by invoking python.bat.
if pynacl.platform.IsWindows():
env['NATIVE_PYTHON'] = 'python.bat'
else:
env['NATIVE_PYTHON'] = 'python'
if sys.platform == 'win32':
# If the temp directory is not on the same drive as the working directory,
# there can be random failures when cleaning up temp directories, so use
# a directory on the current drive. Use __file__ here instead of os.getcwd()
# because toolchain_main picks its working directories relative to __file__
filedrive, _ = os.path.splitdrive(__file__)
tempdrive, _ = os.path.splitdrive(env['TEMP'])
if tempdrive != filedrive:
env['TEMP'] = filedrive + '\\temp'
env['TMP'] = env['TEMP']
if not os.path.exists(env['TEMP']):
os.mkdir(env['TEMP'])
# Run through runtest.py to get upload of perf data.
build_properties = {
'buildername': builder,
'mastername': 'client.nacl',
'buildnumber': str(build_number),
}
factory_properties = {
'perf_id': builder,
'show_perf_results': True,
'step_name': 'naclperf', # Seems unused, but is required.
'test_name': 'naclperf', # Really "Test Suite"
}
# Locate the buildbot build directory by relative path, as it's absolute
# location varies by platform and configuration.
buildbot_build_dir = os.path.join(* [os.pardir] * 4)
runtest = os.path.join(buildbot_build_dir, 'scripts', 'slave', 'runtest.py')
# For builds with an actual build number, require that the script is present
# (i.e. that we're run from an actual buildbot).
if build_number is not None and not os.path.exists(runtest):
raise Exception('runtest.py script not found at: %s\n' % runtest)
cmd_exe = cmd.split(' ')[0]
cmd_exe_ext = os.path.splitext(cmd_exe)[1]
# Do not wrap these types of builds with runtest.py:
# - tryjobs
# - commands beginning with 'echo '
# - batch files
# - debug builders
# - builds with no perf tests
if not (slave_type == 'Trybot' or
cmd_exe == echo or
cmd_exe_ext == '.bat' or
'-dbg' in builder or
HasNoPerfResults(builder)):
# Perf dashboards are now generated by output scraping that occurs in the
# script runtest.py, which lives in the buildbot repository.
# Non-trybot builds should be run through runtest, allowing it to upload
# perf data if relevant.
cmd = ' '.join([
python, runtest,
'--revision=' + build_revision,
'--build-dir=src/out',
'--results-url=https://chromeperf.appspot.com',
'--annotate=graphing',
'--no-xvfb', # We provide our own xvfb invocation.
'--factory-properties', EscapeJson(factory_properties),
'--build-properties', EscapeJson(build_properties),
cmd,
])
print "%s runs: %s\n" % (builder, cmd)
retcode = subprocess.call(cmd, env=env, shell=True)
sys.exit(retcode)
if __name__ == '__main__':
Main()
| bsd-3-clause |
googleinterns/cabby | cabby/model/datasets.py | 1 | 4391 | # coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import os
import pandas as pd
from sklearn.utils import shuffle
from cabby.geo import regions
from cabby.geo import util as gutil
class RUNDataset:
def __init__(self, data_dir: str, s2level: int, lines: bool = False):
train_ds, valid_ds, test_ds, ds = self.load_data(data_dir, lines=lines)
# Get labels.
map_1 = regions.get_region("RUN-map1")
map_2 = regions.get_region("RUN-map2")
map_3 = regions.get_region("RUN-map3")
logging.info(map_1.polygon.wkt)
logging.info(map_2.polygon.wkt)
logging.info(map_3.polygon.wkt)
unique_cellid_map_1 = gutil.cellids_from_polygon(map_1.polygon, s2level)
unique_cellid_map_2 = gutil.cellids_from_polygon(map_2.polygon, s2level)
unique_cellid_map_3 = gutil.cellids_from_polygon(map_3.polygon, s2level)
unique_cellid = (
unique_cellid_map_1 + unique_cellid_map_2 + unique_cellid_map_3)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.ds = ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
def load_data(self, data_dir: str, lines: bool):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
ds['instructions'] = ds.groupby(
['id'])['instruction'].transform(lambda x: ' '.join(x))
ds = ds.drop_duplicates(subset='id', keep="last")
columns_keep = ds.columns.difference(
['map', 'id', 'instructions', 'end_point', 'start_point'])
ds.drop(columns_keep, 1, inplace=True)
ds = shuffle(ds)
ds.reset_index(inplace=True, drop=True)
dataset_size = ds.shape[0]
logging.info(f"Size of dataset: {ds.shape[0]}")
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
return train_ds, valid_ds, test_ds, ds
class RVSDataset:
def __init__(self, data_dir: str, s2level: int, region: str, lines: bool = True):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
logging.info(f"Size of dataset before removal of duplication: {ds.shape[0]}")
ds = pd.concat([ds.drop(['geo_landmarks'], axis=1), ds['geo_landmarks'].apply(pd.Series)], axis=1)
lengths = ds.end_point.apply(lambda x: x if len(x) == 3 else "").tolist()
ds['end_osmid'] = ds.end_point.apply(lambda x: x[1])
ds['start_osmid'] = ds.start_point.apply(lambda x: x[1])
ds['end_pivot'] = ds.end_point
ds['end_point'] = ds.end_point.apply(lambda x: x[3])
ds['start_point'] = ds.start_point.apply(lambda x: x[3])
ds = ds.drop_duplicates(subset=['end_osmid', 'start_osmid'], keep='last')
logging.info(f"Size of dataset after removal of duplication: {ds.shape[0]}")
dataset_size = ds.shape[0]
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
# Get labels.
active_region = regions.get_region(region)
unique_cellid = gutil.cellids_from_polygon(active_region.polygon, s2level)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
| apache-2.0 |
ywcui1990/nupic | examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py | 17 | 6193 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def refreshGUI(self):
"""Give plot a pause, so data is drawn and GUI's event loop can run.
"""
plt.pause(0.0001)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
deeplook/bokeh | bokeh/charts/builder/timeseries_builder.py | 26 | 6252 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): a 2d iterable containing the values. Can be anything that
can be converted to a 2d array, and which is the x (time) axis is determined
by ``index``, while the others are interpreted as y values.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/backends/backend_nbagg.py | 2 | 9384 | """Interactive figures in the IPython notebook"""
# Note: There is a notebook in
# lib/matplotlib/backends/web_backend/nbagg_uat.ipynb to help verify
# that changes made maintain expected behaviour.
import datetime
from base64 import b64encode
import json
import io
import os
import six
from uuid import uuid4 as uuid
import tornado.ioloop
from IPython.display import display, Javascript, HTML
try:
# Jupyter/IPython 4.x or later
from ipykernel.comm import Comm
except ImportError:
# Jupyter/IPython 3.x or earlier
from IPython.kernel.comm import Comm
from matplotlib import rcParams, is_interactive
from matplotlib._pylab_helpers import Gcf
from matplotlib.backends.backend_webagg_core import (
FigureCanvasWebAggCore, FigureManagerWebAgg, NavigationToolbar2WebAgg,
TimerTornado)
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, NavigationToolbar2)
from matplotlib.figure import Figure
from matplotlib import is_interactive
from matplotlib.backends.backend_webagg_core import (FigureManagerWebAgg,
FigureCanvasWebAggCore,
NavigationToolbar2WebAgg,
TimerTornado)
from matplotlib.backend_bases import (ShowBase, NavigationToolbar2,
FigureCanvasBase)
def connection_info():
"""
Return a string showing the figure and connection status for
the backend. This is intended as a diagnostic tool, and not for general
use.
"""
result = []
for manager in Gcf.get_all_fig_managers():
fig = manager.canvas.figure
result.append('{0} - {0}'.format((fig.get_label() or
"Figure {0}".format(manager.num)),
manager.web_sockets))
if not is_interactive():
result.append('Figures pending show: {0}'.format(len(Gcf._activeQue)))
return '\n'.join(result)
# Note: Version 3.2 and 4.x icons
# http://fontawesome.io/3.2.1/icons/
# http://fontawesome.io/
# the `fa fa-xxx` part targets font-awesome 4, (IPython 3.x)
# the icon-xxx targets font awesome 3.21 (IPython 2.x)
_FONT_AWESOME_CLASSES = {
'home': 'fa fa-home icon-home',
'back': 'fa fa-arrow-left icon-arrow-left',
'forward': 'fa fa-arrow-right icon-arrow-right',
'zoom_to_rect': 'fa fa-square-o icon-check-empty',
'move': 'fa fa-arrows icon-move',
'download': 'fa fa-floppy-o icon-save',
None: None
}
class NavigationIPy(NavigationToolbar2WebAgg):
# Use the standard toolbar items + download button
toolitems = [(text, tooltip_text,
_FONT_AWESOME_CLASSES[image_file], name_of_method)
for text, tooltip_text, image_file, name_of_method
in (NavigationToolbar2.toolitems +
(('Download', 'Download plot', 'download', 'download'),))
if image_file in _FONT_AWESOME_CLASSES]
class FigureManagerNbAgg(FigureManagerWebAgg):
ToolbarCls = NavigationIPy
def __init__(self, canvas, num):
self._shown = False
FigureManagerWebAgg.__init__(self, canvas, num)
def display_js(self):
# XXX How to do this just once? It has to deal with multiple
# browser instances using the same kernel (require.js - but the
# file isn't static?).
display(Javascript(FigureManagerNbAgg.get_javascript()))
def show(self):
if not self._shown:
self.display_js()
self._create_comm()
else:
self.canvas.draw_idle()
self._shown = True
def reshow(self):
"""
A special method to re-show the figure in the notebook.
"""
self._shown = False
self.show()
@property
def connected(self):
return bool(self.web_sockets)
@classmethod
def get_javascript(cls, stream=None):
if stream is None:
output = io.StringIO()
else:
output = stream
super(FigureManagerNbAgg, cls).get_javascript(stream=output)
with io.open(os.path.join(
os.path.dirname(__file__),
"web_backend",
"nbagg_mpl.js"), encoding='utf8') as fd:
output.write(fd.read())
if stream is None:
return output.getvalue()
def _create_comm(self):
comm = CommSocket(self)
self.add_web_socket(comm)
return comm
def destroy(self):
self._send_event('close')
# need to copy comms as callbacks will modify this list
for comm in list(self.web_sockets):
comm.on_close()
self.clearup_closed()
def clearup_closed(self):
"""Clear up any closed Comms."""
self.web_sockets = set([socket for socket in self.web_sockets
if socket.is_open()])
if len(self.web_sockets) == 0:
self.canvas.close_event()
def remove_comm(self, comm_id):
self.web_sockets = set([socket for socket in self.web_sockets
if not socket.comm.comm_id == comm_id])
class FigureCanvasNbAgg(FigureCanvasWebAggCore):
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
class CommSocket(object):
"""
Manages the Comm connection between IPython and the browser (client).
Comms are 2 way, with the CommSocket being able to publish a message
via the send_json method, and handle a message with on_message. On the
JS side figure.send_message and figure.ws.onmessage do the sending and
receiving respectively.
"""
def __init__(self, manager):
self.supports_binary = None
self.manager = manager
self.uuid = str(uuid())
# Publish an output area with a unique ID. The javascript can then
# hook into this area.
display(HTML("<div id=%r></div>" % self.uuid))
try:
self.comm = Comm('matplotlib', data={'id': self.uuid})
except AttributeError:
raise RuntimeError('Unable to create an IPython notebook Comm '
'instance. Are you in the IPython notebook?')
self.comm.on_msg(self.on_message)
manager = self.manager
self._ext_close = False
def _on_close(close_message):
self._ext_close = True
manager.remove_comm(close_message['content']['comm_id'])
manager.clearup_closed()
self.comm.on_close(_on_close)
def is_open(self):
return not (self._ext_close or self.comm._closed)
def on_close(self):
# When the socket is closed, deregister the websocket with
# the FigureManager.
if self.is_open():
try:
self.comm.close()
except KeyError:
# apparently already cleaned it up?
pass
def send_json(self, content):
self.comm.send({'data': json.dumps(content)})
def send_binary(self, blob):
# The comm is ascii, so we always send the image in base64
# encoded data URL form.
data = b64encode(blob)
if six.PY3:
data = data.decode('ascii')
data_uri = "data:image/png;base64,{0}".format(data)
self.comm.send({'data': data_uri})
def on_message(self, message):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message['content']['data'])
if message['type'] == 'closing':
self.on_close()
self.manager.clearup_closed()
elif message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
self.manager.handle_json(message)
@_Backend.export
class _BackendNbAgg(_Backend):
FigureCanvas = FigureCanvasNbAgg
FigureManager = FigureManagerNbAgg
@staticmethod
def new_figure_manager_given_figure(num, figure):
canvas = FigureCanvasNbAgg(figure)
if rcParams['nbagg.transparent']:
figure.patch.set_alpha(0)
manager = FigureManagerNbAgg(canvas, num)
if is_interactive():
manager.show()
figure.canvas.draw_idle()
canvas.mpl_connect('close_event', lambda event: Gcf.destroy(num))
return manager
@staticmethod
def trigger_manager_draw(manager):
manager.show()
@staticmethod
def show():
from matplotlib._pylab_helpers import Gcf
managers = Gcf.get_all_fig_managers()
if not managers:
return
interactive = is_interactive()
for manager in managers:
manager.show()
# plt.figure adds an event which puts the figure in focus
# in the activeQue. Disable this behaviour, as it results in
# figures being put as the active figure after they have been
# shown, even in non-interactive mode.
if hasattr(manager, '_cidgcf'):
manager.canvas.mpl_disconnect(manager._cidgcf)
if not interactive and manager in Gcf._activeQue:
Gcf._activeQue.remove(manager)
| gpl-3.0 |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_14_evaluation_metrics/evaluate_poi_identifier.py | 1 | 2588 | #!/usr/bin/python
"""
starter code for the evaluation mini-project
start by copying your trained/tested POI identifier from
that you built in the validation mini-project
the second step toward building your POI identifier!
start by loading/formatting the data
"""
import pickle
import sys
sys.path.append("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/tools/")
from feature_format import featureFormat, targetFeatureSplit
from sklearn.tree import DecisionTreeClassifier
from sklearn import cross_validation
import numpy as np
data_dict = pickle.load(open("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/final_project/final_project_dataset.pkl", "r") )
### add more features to features_list!
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
### your code goes here
features_train,features_test,labels_train,labels_test = cross_validation.train_test_split(features,labels,test_size=0.3,
random_state=42)
clf = DecisionTreeClassifier()
clf.fit(features_train,labels_train)
clf.score(features_test,labels_test)
# How many POIs are in the test set for your POI identifier?
pred = clf.predict(features_test)
sum(pred)
print len([e for e in labels_test if e == 1.0])
# How many people total are in your test set?
len(pred)
# If your identifier predicted 0. (not POI) for everyone in the test set, what would its accuracy be?
1.0 - 5.0/29
# Precision and recall can help illuminate your performance better.
# Use the precision_score and recall_score available in sklearn.metrics to compute those quantities.
# What’s the precision?
from sklearn.metrics import *
precision_score(labels_test, pred)
# What’s the recall?
recall_score(labels_test, pred)
# Here are some made-up predictions and true labels for a hypothetical test set;
# fill in the following boxes to practice identifying true positives, false positives, true negatives, and false negatives.
# Let’s use the convention that “1” signifies a positive result, and “0” a negative.
predictions = [0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]
true_labels = [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0]
# What's the precision of this classifier?
precision_score(true_labels, predictions)
# What's the recall of this classifier?
recall_score(true_labels, predictions)
| mit |
Garrett-R/scikit-learn | sklearn/linear_model/randomized_l1.py | 11 | 23088 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold: float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
matplotlib/cmocean | cmocean/rgb/dense.py | 2 | 13693 |
from matplotlib.colors import ListedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in pycam02ucs.cm.viscm
parameters = {'xp': [16.121891585344997, 33.901145962549492, 5.5873058066040926, -14.703203914141397, -17.875928056390336, -5.3288735306278738],
'yp': [-2.5423728813559308, -13.425925925925895, -42.422027290448327, -35.333333333333314, -8.83264462809916, -2.1686159844054487],
'min_Jp': 15.0,
'max_Jp': 95.0}
cm_data = [[ 0.21298394, 0.05589169, 0.14220951],
[ 0.21780744, 0.0570005 , 0.14665582],
[ 0.22261214, 0.05808842, 0.15115908],
[ 0.22739756, 0.05915624, 0.15572185],
[ 0.23216536, 0.06020099, 0.16034977],
[ 0.23691745, 0.06121879, 0.1650498 ],
[ 0.24164654, 0.06222163, 0.169816 ],
[ 0.24635153, 0.06321115, 0.17465056],
[ 0.25103114, 0.06418929, 0.1795555 ],
[ 0.25568737, 0.06515168, 0.1845388 ],
[ 0.26031556, 0.06610638, 0.18959733],
[ 0.26491272, 0.06705861, 0.19473015],
[ 0.26947709, 0.0680114 , 0.19993831],
[ 0.27400681, 0.06896804, 0.20522255],
[ 0.27849993, 0.06993211, 0.21058327],
[ 0.28295501, 0.07090603, 0.21602205],
[ 0.28737014, 0.0718934 , 0.22153921],
[ 0.29174204, 0.07290112, 0.22713094],
[ 0.29606871, 0.07393344, 0.23279613],
[ 0.30034822, 0.07499465, 0.23853326],
[ 0.30457867, 0.07608911, 0.24434046],
[ 0.30875826, 0.07722111, 0.25021549],
[ 0.31288529, 0.0783949 , 0.25615575],
[ 0.3169582 , 0.07961456, 0.26215837],
[ 0.32097556, 0.08088399, 0.26822019],
[ 0.32493609, 0.08220684, 0.27433782],
[ 0.3288387 , 0.08358647, 0.28050768],
[ 0.33268245, 0.08502593, 0.28672608],
[ 0.33646657, 0.08652789, 0.2929892 ],
[ 0.34019047, 0.08809468, 0.29929318],
[ 0.34385372, 0.08972821, 0.30563417],
[ 0.34745604, 0.09143006, 0.31200825],
[ 0.35099729, 0.0932014 , 0.31841152],
[ 0.35447749, 0.09504303, 0.32484029],
[ 0.35789677, 0.09695535, 0.33129096],
[ 0.36125536, 0.09893846, 0.33776007],
[ 0.36455362, 0.10099212, 0.34424427],
[ 0.36779195, 0.10311585, 0.35074041],
[ 0.37097085, 0.10530889, 0.35724546],
[ 0.37409088, 0.10757029, 0.36375657],
[ 0.37715263, 0.10989888, 0.37027108],
[ 0.38015674, 0.11229336, 0.37678646],
[ 0.38310387, 0.11475229, 0.38330035],
[ 0.38599472, 0.11727411, 0.38981058],
[ 0.38882999, 0.1198572 , 0.3963151 ],
[ 0.39161037, 0.12249987, 0.402812 ],
[ 0.3943366 , 0.12520039, 0.40929955],
[ 0.39700936, 0.12795703, 0.41577611],
[ 0.39962936, 0.13076802, 0.42224018],
[ 0.40219729, 0.13363161, 0.42869038],
[ 0.40471394, 0.13654614, 0.43512488],
[ 0.40717995, 0.13950986, 0.44154258],
[ 0.4095959 , 0.14252107, 0.44794287],
[ 0.41196239, 0.14557814, 0.45432475],
[ 0.41428002, 0.1486795 , 0.4606873 ],
[ 0.41654936, 0.15182361, 0.46702967],
[ 0.41877098, 0.15500903, 0.47335108],
[ 0.4209454 , 0.15823432, 0.4796508 ],
[ 0.42307313, 0.16149814, 0.48592814],
[ 0.42515465, 0.16479918, 0.49218247],
[ 0.42719043, 0.1681362 , 0.49841321],
[ 0.42918111, 0.17150798, 0.50461925],
[ 0.431127 , 0.17491341, 0.5108004 ],
[ 0.43302838, 0.17835141, 0.5169565 ],
[ 0.43488561, 0.18182099, 0.52308708],
[ 0.43669905, 0.18532117, 0.5291917 ],
[ 0.43846903, 0.18885105, 0.53526994],
[ 0.44019583, 0.19240976, 0.54132138],
[ 0.44187976, 0.19599648, 0.54734563],
[ 0.44352106, 0.19961045, 0.5533423 ],
[ 0.44512012, 0.2032509 , 0.55931077],
[ 0.44667705, 0.20691717, 0.56525088],
[ 0.44819199, 0.21060865, 0.57116243],
[ 0.44966511, 0.21432473, 0.57704502],
[ 0.45109659, 0.21806485, 0.58289828],
[ 0.45248658, 0.22182847, 0.58872183],
[ 0.45383521, 0.2256151 , 0.59451528],
[ 0.45514261, 0.22942427, 0.60027826],
[ 0.45640887, 0.23325554, 0.60601037],
[ 0.45763398, 0.23710854, 0.61171135],
[ 0.45881803, 0.24098289, 0.61738074],
[ 0.4599611 , 0.24487823, 0.62301809],
[ 0.46106323, 0.24879421, 0.62862296],
[ 0.46212445, 0.25273054, 0.63419487],
[ 0.46314479, 0.25668693, 0.63973335],
[ 0.46412426, 0.2606631 , 0.6452379 ],
[ 0.46506286, 0.2646588 , 0.650708 ],
[ 0.46596031, 0.26867393, 0.65614343],
[ 0.46681665, 0.27270825, 0.66154354],
[ 0.467632 , 0.27676148, 0.66690758],
[ 0.46840632, 0.28083345, 0.67223496],
[ 0.46913959, 0.28492398, 0.67752502],
[ 0.46983176, 0.28903289, 0.68277713],
[ 0.47048281, 0.29316004, 0.68799058],
[ 0.4710927 , 0.29730529, 0.69316468],
[ 0.47166137, 0.30146848, 0.69829868],
[ 0.47218867, 0.30564956, 0.70339194],
[ 0.47267406, 0.30984863, 0.70844403],
[ 0.47311806, 0.3140653 , 0.71345366],
[ 0.47352067, 0.31829946, 0.71841996],
[ 0.47388188, 0.322551 , 0.72334205],
[ 0.47420168, 0.32681981, 0.728219 ],
[ 0.47448009, 0.33110575, 0.73304987],
[ 0.47471715, 0.33540873, 0.73783366],
[ 0.4749129 , 0.33972863, 0.74256938],
[ 0.47506742, 0.34406531, 0.74725597],
[ 0.4751808 , 0.34841867, 0.75189235],
[ 0.47525316, 0.35278857, 0.75647742],
[ 0.47528466, 0.35717487, 0.76101004],
[ 0.47527514, 0.36157758, 0.76548918],
[ 0.47522479, 0.36599656, 0.76991363],
[ 0.47513427, 0.37043147, 0.77428199],
[ 0.47500393, 0.37488213, 0.77859297],
[ 0.47483412, 0.37934834, 0.7828453 ],
[ 0.4746253 , 0.38382989, 0.78703766],
[ 0.47437795, 0.38832654, 0.7911687 ],
[ 0.47409263, 0.39283807, 0.79523708],
[ 0.47376999, 0.39736419, 0.79924139],
[ 0.47341074, 0.40190463, 0.80318024],
[ 0.47301567, 0.40645908, 0.80705223],
[ 0.47258566, 0.41102721, 0.81085591],
[ 0.47212171, 0.41560865, 0.81458986],
[ 0.4716249 , 0.42020304, 0.81825263],
[ 0.47109642, 0.42480997, 0.82184277],
[ 0.47053758, 0.42942898, 0.82535887],
[ 0.4699498 , 0.43405962, 0.82879947],
[ 0.46933466, 0.43870139, 0.83216318],
[ 0.46869383, 0.44335376, 0.83544858],
[ 0.46802917, 0.44801616, 0.83865432],
[ 0.46734263, 0.45268799, 0.84177905],
[ 0.46663636, 0.45736864, 0.84482148],
[ 0.46591265, 0.46205743, 0.84778034],
[ 0.46517394, 0.46675366, 0.85065444],
[ 0.46442285, 0.47145661, 0.85344263],
[ 0.46366216, 0.4761655 , 0.85614385],
[ 0.46289481, 0.48087955, 0.85875708],
[ 0.46212297, 0.48559831, 0.8612812 ],
[ 0.4613509 , 0.49032052, 0.86371555],
[ 0.46058208, 0.49504528, 0.86605942],
[ 0.45982017, 0.49977167, 0.86831217],
[ 0.45906898, 0.50449872, 0.87047333],
[ 0.4583325 , 0.50922545, 0.87254251],
[ 0.45761487, 0.51395086, 0.87451947],
[ 0.45692037, 0.51867392, 0.87640412],
[ 0.45625342, 0.52339359, 0.87819649],
[ 0.45561856, 0.52810881, 0.87989676],
[ 0.45502044, 0.53281852, 0.88150529],
[ 0.45446291, 0.53752203, 0.8830221 ],
[ 0.45395166, 0.5422179 , 0.88444824],
[ 0.45349173, 0.54690499, 0.88578463],
[ 0.45308803, 0.55158223, 0.88703226],
[ 0.45274551, 0.55624857, 0.8881923 ],
[ 0.45246908, 0.56090297, 0.88926607],
[ 0.45226366, 0.5655444 , 0.89025507],
[ 0.45213406, 0.57017185, 0.89116092],
[ 0.45208461, 0.57478456, 0.89198505],
[ 0.45212047, 0.57938135, 0.89272981],
[ 0.45224622, 0.5839613 , 0.89339735],
[ 0.45246621, 0.58852353, 0.89398987],
[ 0.45278458, 0.59306722, 0.89450974],
[ 0.45320531, 0.59759159, 0.89495941],
[ 0.45373211, 0.60209592, 0.89534144],
[ 0.45436847, 0.60657953, 0.8956585 ],
[ 0.45511768, 0.61104174, 0.89591342],
[ 0.45598269, 0.61548199, 0.89610905],
[ 0.45696613, 0.61989976, 0.89624827],
[ 0.45807033, 0.62429458, 0.89633399],
[ 0.45929732, 0.62866605, 0.89636919],
[ 0.46064879, 0.63301382, 0.89635684],
[ 0.46212629, 0.6373375 , 0.89630027],
[ 0.46373081, 0.6416369 , 0.89620239],
[ 0.46546305, 0.64591186, 0.89606608],
[ 0.46732345, 0.65016224, 0.89589433],
[ 0.46931216, 0.65438798, 0.89569008],
[ 0.47142903, 0.65858902, 0.89545627],
[ 0.47367364, 0.66276538, 0.89519579],
[ 0.47604536, 0.66691708, 0.89491161],
[ 0.47854335, 0.67104413, 0.89460702],
[ 0.48116628, 0.67514678, 0.89428415],
[ 0.48391278, 0.67922522, 0.89394566],
[ 0.48678129, 0.68327963, 0.89359417],
[ 0.48977007, 0.68731025, 0.89323218],
[ 0.4928772 , 0.69131735, 0.89286215],
[ 0.49610063, 0.69530122, 0.89248647],
[ 0.49943822, 0.69926217, 0.89210744],
[ 0.50288765, 0.70320047, 0.89172772],
[ 0.50644655, 0.70711649, 0.89134936],
[ 0.51011248, 0.71101066, 0.8909741 ],
[ 0.51388294, 0.71488334, 0.89060393],
[ 0.51775541, 0.71873493, 0.89024078],
[ 0.52172732, 0.72256583, 0.8898865 ],
[ 0.5257961 , 0.72637645, 0.88954287],
[ 0.52995915, 0.7301672 , 0.8892116 ],
[ 0.53421391, 0.7339385 , 0.88889434],
[ 0.5385578 , 0.73769077, 0.88859267],
[ 0.5429883 , 0.74142444, 0.88830811],
[ 0.54750281, 0.74513991, 0.88804246],
[ 0.5520989 , 0.74883762, 0.88779685],
[ 0.55677422, 0.75251799, 0.88757251],
[ 0.56152638, 0.75618144, 0.88737072],
[ 0.56635309, 0.75982839, 0.88719273],
[ 0.57125208, 0.76345922, 0.88703974],
[ 0.57622118, 0.76707435, 0.8869129 ],
[ 0.58125826, 0.77067417, 0.88681333],
[ 0.58636126, 0.77425906, 0.88674212],
[ 0.59152819, 0.7778294 , 0.88670031],
[ 0.59675713, 0.78138555, 0.88668891],
[ 0.60204624, 0.78492789, 0.88670892],
[ 0.60739371, 0.78845676, 0.88676131],
[ 0.61279785, 0.79197249, 0.886847 ],
[ 0.61825699, 0.79547544, 0.88696697],
[ 0.62376953, 0.79896592, 0.88712212],
[ 0.62933401, 0.80244424, 0.88731328],
[ 0.63494897, 0.80591071, 0.88754133],
[ 0.64061303, 0.80936562, 0.88780715],
[ 0.64632485, 0.81280925, 0.88811162],
[ 0.65208315, 0.81624189, 0.88845562],
[ 0.65788673, 0.81966379, 0.88884001],
[ 0.6637344 , 0.82307522, 0.88926568],
[ 0.66962506, 0.82647642, 0.88973352],
[ 0.67555762, 0.82986764, 0.89024441],
[ 0.68153106, 0.83324911, 0.89079928],
[ 0.68754438, 0.83662105, 0.89139904],
[ 0.69359663, 0.83998369, 0.89204464],
[ 0.69968688, 0.84333724, 0.89273702],
[ 0.70581423, 0.84668191, 0.89347718],
[ 0.71197782, 0.85001791, 0.8942661 ],
[ 0.7181769 , 0.85334541, 0.89510469],
[ 0.72441053, 0.85666464, 0.89599414],
[ 0.73067788, 0.8599758 , 0.89693553],
[ 0.73697811, 0.8632791 , 0.89793 ],
[ 0.74331039, 0.86657473, 0.89897869],
[ 0.74967389, 0.86986292, 0.90008279],
[ 0.75606778, 0.87314387, 0.90124351],
[ 0.76249117, 0.87641781, 0.90246212],
[ 0.7689432 , 0.87968498, 0.90373988],
[ 0.77542295, 0.88294564, 0.9050781 ],
[ 0.78192947, 0.88620003, 0.90647814],
[ 0.78846179, 0.88944845, 0.90794134],
[ 0.79501887, 0.89269119, 0.9094691 ],
[ 0.80159965, 0.89592859, 0.91106281],
[ 0.80820295, 0.899161 , 0.91272391],
[ 0.81482754, 0.90238881, 0.91445386],
[ 0.82147215, 0.90561245, 0.91625407],
[ 0.82813543, 0.90883237, 0.91812595],
[ 0.83481598, 0.91204906, 0.92007088],
[ 0.84151229, 0.91526306, 0.92209023],
[ 0.84822279, 0.91847494, 0.92418529],
[ 0.85494584, 0.92168533, 0.92635732],
[ 0.8616797 , 0.9248949 , 0.92860749],
[ 0.86842255, 0.92810438, 0.9309369 ],
[ 0.87517248, 0.93131455, 0.93334654],
[ 0.88192751, 0.93452625, 0.93583728],
[ 0.88868558, 0.93774038, 0.93840987],
[ 0.89544454, 0.94095789, 0.94106488],
[ 0.90220216, 0.9441798 , 0.94380273]]
test_cm = ListedColormap(cm_data, name=__file__)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| mit |
ctozlm/Dato-Core | src/unity/python/graphlab/data_structures/sframe.py | 13 | 196438 | """
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import infer_type_of_list
from graphlab.cython.context import debug_trace as cython_context
from graphlab.cython.cy_sframe import UnitySFrameProxy
from graphlab.util import _check_canvas_enabled, _make_internal_url, _is_callable
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import graphlab.aggregate
import graphlab
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import inspect
from graphlab.deps import pandas, HAS_PANDAS
import time
import itertools
import os
import subprocess
import uuid
import platform
__all__ = ['SFrame']
SFRAME_GARBAGE_COLLECTOR = []
FOOTER_STRS = ['Note: Only the head of the SFrame is printed.',
'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.']
LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.',
'You can use len(sf) to force materialization.']
SFRAME_ROOTS = [# Binary/lib location in production egg
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')),
# Build tree location of SFrame binaries
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'sframe')),
# Location of python sources
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'unity', 'python', 'graphlab')),
# Build tree dependency location
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', '..', '..', 'deps', 'local', 'lib'))
]
RDD_SFRAME_PICKLE = "rddtosf_pickle"
RDD_SFRAME_NONPICKLE = "rddtosf_nonpickle"
SFRAME_RDD_PICKLE = "sftordd_pickle"
HDFS_LIB = "libhdfs.so"
RDD_JAR_FILE = "graphlab-create-spark-integration.jar"
SYS_UTIL_PY = "sys_util.py"
RDD_SUPPORT_INITED = False
BINARY_PATHS = {}
STAGING_DIR = None
RDD_SUPPORT = True
PRODUCTION_RUN = False
YARN_OS = None
SPARK_SUPPORT_NAMES = {'RDD_SFRAME_PATH':'rddtosf_pickle',
'RDD_SFRAME_NONPICKLE_PATH':'rddtosf_nonpickle',
'SFRAME_RDD_PATH':'sftordd_pickle',
'HDFS_LIB_PATH':'libhdfs.so',
'RDD_JAR_PATH':'graphlab-create-spark-integration.jar',
'SYS_UTIL_PY_PATH':'sys_util.py',
'SPARK_PIPE_WRAPPER_PATH':'spark_pipe_wrapper'}
first = True
for i in SFRAME_ROOTS:
for key,val in SPARK_SUPPORT_NAMES.iteritems():
tmp_path = os.path.join(i, val)
if key not in BINARY_PATHS and os.path.isfile(tmp_path):
BINARY_PATHS[key] = tmp_path
if all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
if first:
PRODUCTION_RUN = True
break
first = False
if not all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
RDD_SUPPORT = False
def get_spark_integration_jar_path():
"""
The absolute path of the jar file required to enable GraphLab Create's
integration with Apache Spark.
"""
if 'RDD_JAR_PATH' not in BINARY_PATHS:
raise RuntimeError("Could not find a spark integration jar. "\
"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?")
return BINARY_PATHS['RDD_JAR_PATH']
def __rdd_support_init__(sprk_ctx):
global YARN_OS
global RDD_SUPPORT_INITED
global STAGING_DIR
global BINARY_PATHS
if not RDD_SUPPORT or RDD_SUPPORT_INITED:
return
# Make sure our GraphLabUtil scala functions are accessible from the driver
try:
tmp = sprk_ctx._jvm.org.graphlab.create.GraphLabUtil.EscapeString(sprk_ctx._jvm.java.lang.String("1,2,3,4"))
except:
raise RuntimeError("Could not execute RDD translation functions. "\
"Please make sure you have started Spark "\
"(either with spark-submit or pyspark) with the following flag set:\n"\
"'--driver-class-path " + BINARY_PATHS['RDD_JAR_PATH']+"'\n"\
"OR set the property spark.driver.extraClassPath in spark-defaults.conf")
dummy_rdd = sprk_ctx.parallelize([1])
if PRODUCTION_RUN and sprk_ctx.master == 'yarn-client':
# Get cluster operating system
os_rdd = dummy_rdd.map(lambda x: platform.system())
YARN_OS = os_rdd.collect()[0]
# Set binary path
for i in BINARY_PATHS.keys():
s = BINARY_PATHS[i]
if os.path.basename(s) == SPARK_SUPPORT_NAMES['SYS_UTIL_PY_PATH']:
continue
if YARN_OS == 'Linux':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'linux', os.path.basename(s))
elif YARN_OS == 'Darwin':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'osx', os.path.basename(s))
else:
raise RuntimeError("YARN cluster has unsupported operating system "\
"(something other than Linux or Mac OS X). "\
"Cannot convert RDDs on this cluster to SFrame.")
# Create staging directory
staging_dir = '.graphlabStaging'
if sprk_ctx.master == 'yarn-client':
tmp_loc = None
# Get that staging directory's full name
tmp_loc = dummy_rdd.map(
lambda x: subprocess.check_output(
["hdfs", "getconf", "-confKey", "fs.defaultFS"]).rstrip()).collect()[0]
STAGING_DIR = os.path.join(tmp_loc, "user", sprk_ctx.sparkUser(), staging_dir)
if STAGING_DIR is None:
raise RuntimeError("Failed to create a staging directory on HDFS. "\
"Do your cluster nodes have a working hdfs client?")
# Actually create the staging dir
unity = glconnect.get_unity()
unity.__mkdir__(STAGING_DIR)
unity.__chmod__(STAGING_DIR, 0777)
elif sprk_ctx.master[0:5] == 'local':
# Save the output sframes to the same temp workspace this engine is
# using
#TODO: Consider cases where server and client aren't on the same machine
unity = glconnect.get_unity()
STAGING_DIR = unity.get_current_cache_file_location()
if STAGING_DIR is None:
raise RuntimeError("Could not retrieve local staging directory! \
Please contact us on http://forum.dato.com.")
else:
raise RuntimeError("Your spark context's master is '" +
str(sprk_ctx.master) +
"'. Only 'local' and 'yarn-client' are supported.")
if sprk_ctx.master == 'yarn-client':
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_PATH'])
sprk_ctx.addFile(BINARY_PATHS['HDFS_LIB_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SFRAME_RDD_PATH'])
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SYS_UTIL_PY_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'])
sprk_ctx._jsc.addJar(BINARY_PATHS['RDD_JAR_PATH'])
RDD_SUPPORT_INITED = True
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = graphlab.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
class SFrame(object):
"""
A tabular, column-mutable dataframe object that can scale to big data. The
data in SFrame is stored column-wise on the GraphLab Server side, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~graphlab.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
* Apache Avro
* PySpark RDD
and from the following sources:
* your local file system
* the GraphLab Server's file system
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://dato.com/learn/user
guide/index.html#Working_with_data_Tabular_data>`_, `API Translator
<https://dato.com/learn/translator>`_, `How-Tos
<https://dato.com/learn/how-to>`_, and data science `Gallery
<https://dato.com/learn/gallery>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SFrame cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. A similar restriction applies to
:py:class:`graphlab.SGraph` and :py:class:`graphlab.SArray`.
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. GRAPHLAB_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. GRAPHLAB_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import graphlab
>>> from graphlab import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started. Alternatively, you can use
:py:func:`graphlab.aws.set_credentials()` to set the credentials after
python is started and :py:func:`graphlab.aws.get_credentials()` to verify
these environment variables.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['GRAPHLAB_JAVA_HOME'] = '/my/path/to/java'
>>> from graphlab import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = gl.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`graphlab.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> gl.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> gl.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> gl.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> gl.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> gl.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 seperated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> gl.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'seperated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = graphlab.load_sframe('mysframedir')
**Column Manipulation **
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~graphlab.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from graphlab import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ['shape', '__proxy__', '_proxy']
def __init__(self, data=None,
format='auto',
_proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
tracker = _mt._get_metric_tracker()
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy(glconnect.get_client())
_format = None
if (format == 'auto'):
if (HAS_PANDAS and isinstance(data, pandas.DataFrame)):
_format = 'dataframe'
tracker.track('sframe.location.memory', value=1)
elif (isinstance(data, str) or isinstance(data, unicode)):
if data.find('://') == -1:
suffix = 'local'
else:
suffix = data.split('://')[0]
tracker.track(('sframe.location.%s' % (suffix)), value=1)
if data.endswith(('.csv', '.csv.gz')):
_format = 'csv'
elif data.endswith(('.tsv', '.tsv.gz')):
_format = 'tsv'
elif data.endswith(('.txt', '.txt.gz')):
print "Assuming file is csv. For other delimiters, " + \
"please use `SFrame.read_csv`."
_format = 'csv'
else:
_format = 'sframe'
elif type(data) == SArray:
_format = 'sarray'
elif isinstance(data, SFrame):
_format = 'sframe_obj'
elif (hasattr(data, 'iteritems')):
_format = 'dict'
tracker.track('sframe.location.memory', value=1)
elif hasattr(data, '__iter__'):
_format = 'array'
tracker.track('sframe.location.memory', value=1)
elif data is None:
_format = 'empty'
else:
raise ValueError('Cannot infer input type for data ' + str(data))
else:
_format = format
tracker.track(('sframe.format.%s' % _format), value=1)
with cython_context():
if (_format == 'dataframe'):
self.__proxy__.load_from_dataframe(data)
elif (_format == 'sframe_obj'):
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif (_format == 'sarray'):
self.__proxy__.add_column(data.__proxy__, "")
elif (_format == 'array'):
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr)
elif SArray in unique_types:
raise ValueError("Cannot create SFrame from mix of regular values and SArrays")
else:
self.__proxy__.add_column(SArray(data).__proxy__, "")
elif (_format == 'dict'):
for key,val in iter(sorted(data.iteritems())):
if (type(val) == SArray):
self.__proxy__.add_column(val.__proxy__, key)
else:
self.__proxy__.add_column(SArray(val).__proxy__, key)
elif (_format == 'csv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter=',', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'tsv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter='\t', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'sframe'):
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif (_format == 'empty'):
pass
else:
raise ValueError('Unknown input type: ' + format)
sframe_size = -1
if self.__has_size__():
sframe_size = self.num_rows()
tracker.track('sframe.row.size', value=sframe_size)
tracker.track('sframe.col.size', value=self.num_cols())
@staticmethod
def _infer_column_types_from_lines(first_rows):
if (len(first_rows.column_names()) < 1):
print "Insufficient number of columns to perform type inference"
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print "Insufficient number of rows to perform type inference"
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [list(first_rows[col])
for col in first_rows.column_names()]
# transpose
all_column_values = [list(x) for x in zip(*all_column_values_transposed)]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print "Unable to infer column types. Defaulting to str"
return str
import types
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if (len(d) == 1):
# easy case. both agree on the type
continue
if ((int in d) and (float in d)):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif ((array.array in d) and (list in d)):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif types.NoneType in d:
# one is a NoneType. assign to other type
if currow[j] != types.NoneType:
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything whih is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == types.NoneType:
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True,
store_errors=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = escape_char
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows != None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy(glconnect.get_client())
internal_url = _make_internal_url(url)
if (not verbose):
glconnect.get_client().set_log_progress(False)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first 100 rows (using all the desired arguments).
first_rows = graphlab.SFrame.read_csv(url, nrows=100,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values = na_values)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']'
print "------------------------------------------------------"
print "Inferred types from first line of file as "
print "column_type_hints="+ typelist
print "If parsing fails due to incorrect types, you can correct"
print "the inferred type list above and pass it to read_csv in"
print "the column_type_hints argument"
print "------------------------------------------------------"
column_type_inference_was_used = True
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
print 'Could not detect types. Using str for each column.'
if type(column_type_hints) is type:
type_hints = {'__all_columns__': column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))
elif type(column_type_hints) is dict:
type_hints = column_type_hints
else:
raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.")
_mt._get_metric_tracker().track('sframe.csv.parse')
suffix=''
if url.find('://') == -1:
suffix = 'local'
else:
suffix = url.split('://')[0]
_mt._get_metric_tracker().track(('sframe.location.%s' % (suffix)), value=1)
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
if column_type_inference_was_used:
# try again
print "Unable to parse the file with automatic type inference."
print "Defaulting to column_type_hints=str"
type_hints = {'__all_columns__': str}
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except:
raise
else:
raise
glconnect.get_client().set_log_progress(True)
return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.iteritems() })
@classmethod
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> (sf, bad_lines) = graphlab.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=True)
@classmethod
def read_csv(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = graphlab.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = graphlab.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = graphlab.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = graphlab.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = graphlab.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> sf = graphlab.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=False)[0]
def to_schema_rdd(self,sc,sql,number_of_partitions=4):
"""
Convert the current SFrame to the Spark SchemaRDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
sql : SQLContext
sql is an existing SQLContext.
number_of_partitions : int
number of partitions for the output rdd
Returns
----------
out: SchemaRDD
Examples
--------
>>> from pyspark import SparkContext, SQLContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sqlc = SQLContext(sc)
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_schema_rdd(sc, sqlc)
>>> rdd.collect()
[Row(x=1, y=u'fish'), Row(x=2, y=u'chips'), Row(x=3, y=u'salad')]
"""
def homogeneous_type(seq):
if seq is None or len(seq) == 0:
return True
iseq = iter(seq)
first_type = type(next(iseq))
return True if all( (type(x) is first_type) for x in iseq ) else False
if len(self) == 0:
raise ValueError("SFrame is empty")
column_names = self.column_names()
first_row = self.head(1)[0]
for name in column_names:
if hasattr(first_row[name],'__iter__') and homogeneous_type(first_row[name]) is not True:
raise TypeError("Support for translation to Spark SchemaRDD not enabled for heterogeneous iterable type (column: %s). Use SFrame.to_rdd()." % name)
for _type in self.column_types():
if(_type.__name__ == 'datetime'):
raise TypeError("Support for translation to Spark SchemaRDD not enabled for datetime type. Use SFrame.to_rdd() ")
rdd = self.to_rdd(sc,number_of_partitions);
from pyspark.sql import Row
rowRdd = rdd.map(lambda x: Row(**x))
return sql.inferSchema(rowRdd)
def to_rdd(self, sc, number_of_partitions=4):
"""
Convert the current SFrame to the Spark RDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
number_of_partitions: int
number of partitions for the output rdd
Returns
----------
out: RDD
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_rdd(sc)
>>> rdd.collect()
[{'x': 1L, 'y': 'fish'}, {'x': 2L, 'y': 'chips'}, {'x': 3L, 'y': 'salad'}]
"""
_mt._get_metric_tracker().track('sframe.to_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
for _type in self.column_types():
if(_type.__name__ == 'Image'):
raise TypeError("Support for translation to Spark RDDs not enabled for Image type.")
if type(number_of_partitions) is not int:
raise ValueError("number_of_partitions parameter expects an integer type")
if number_of_partitions == 0:
raise ValueError("number_of_partitions can not be initialized to zero")
# Save SFrame in a temporary place
tmp_loc = self.__get_staging_dir__(sc)
sf_loc = os.path.join(tmp_loc, str(uuid.uuid4()))
self.save(sf_loc)
# Keep track of the temporary sframe that is saved(). We need to delete it eventually.
dummysf = load_sframe(sf_loc)
dummysf.__proxy__.delete_on_close()
SFRAME_GARBAGE_COLLECTOR.append(dummysf)
sframe_len = self.__len__()
small_partition_size = sframe_len/number_of_partitions
big_partition_size = small_partition_size + 1
num_big_partition_size = sframe_len % number_of_partitions
num_small_partition_size = number_of_partitions - num_big_partition_size
count = 0
start_index = 0
ranges = []
while(count < number_of_partitions):
if(count < num_big_partition_size):
ranges.append((str(start_index)+":"+str(start_index + big_partition_size)))
start_index = start_index + big_partition_size
else:
ranges.append((str(start_index)+":"+str(start_index + small_partition_size)))
start_index = start_index + small_partition_size
count+=1
from pyspark import RDD
rdd = sc.parallelize(ranges,number_of_partitions)
if sc.master[0:5] == 'local':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + \
" " + BINARY_PATHS['SFRAME_RDD_PATH'] + " " + sf_loc)
elif sc.master == 'yarn-client':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] + \
" " + "./" + SPARK_SUPPORT_NAMES['SFRAME_RDD_PATH'] + \
" " + sf_loc)
serializedRdd = sc._jvm.org.graphlab.create.GraphLabUtil.stringToByte(pipeRdd)
import pyspark
output_rdd = RDD(serializedRdd,sc,pyspark.serializers.PickleSerializer())
return output_rdd
@classmethod
def __get_staging_dir__(cls,cur_sc):
if not RDD_SUPPORT_INITED:
__rdd_support_init__(cur_sc)
return STAGING_DIR
@classmethod
def from_rdd(cls, rdd):
"""
Convert a Spark RDD into a GraphLab Create SFrame.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
rdd : pyspark.rdd.RDD
Returns
-------
out : SFrame
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> rdd = sc.parallelize([1,2,3])
>>> sf = SFrame.from_rdd(rdd)
>>> sf
Data:
+-----+
| X1 |
+-----+
| 1.0 |
| 2.0 |
| 3.0 |
+-----+
[3 rows x 1 columns]
"""
_mt._get_metric_tracker().track('sframe.from_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
checkRes = rdd.take(1);
if len(checkRes) > 0 and checkRes[0].__class__.__name__ == 'Row' and rdd.__class__.__name__ not in {'SchemaRDD','DataFrame'}:
raise Exception("Conversion from RDD(pyspark.sql.Row) to SFrame not supported. Please call inferSchema(RDD) first.")
if(rdd._jrdd_deserializer.__class__.__name__ == 'UTF8Deserializer'):
return SFrame.__from_UTF8Deserialized_rdd__(rdd)
sf_names = None
rdd_type = "rdd"
if rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}:
rdd_type = "schemardd"
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
else:
sf_names = first_row.__FIELDS__
sf_names = [str(i) for i in sf_names]
cur_sc = rdd.ctx
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
mode = "batch"
if(rdd._jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
mode = "pickle"
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " + \
BINARY_PATHS['RDD_SFRAME_PATH'] + " " + tmp_loc +\
" " + mode + " " + rdd_type)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" + SPARK_SUPPORT_NAMES['RDD_SFRAME_PATH'] + " " +\
tmp_loc + " " + mode + " " + rdd_type)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf_coltypes = out_sf.column_types()
if(len(out_sf_coltypes) != 0):
sf_coltypes = sf.column_types()
sf_temp_names = sf.column_names()
out_sf_temp_names = out_sf.column_names()
for i in range(len(sf_coltypes)):
if sf_coltypes[i] != out_sf_coltypes[i]:
print "mismatch for types %s and %s" % (sf_coltypes[i],out_sf_coltypes[i])
sf[sf_temp_names[i]] = sf[sf_temp_names[i]].astype(str)
out_sf[out_sf_temp_names[i]] = out_sf[out_sf_temp_names[i]].astype(str)
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def __from_UTF8Deserialized_rdd__(cls, rdd):
_mt._get_metric_tracker().track('sframe.__from_UTF8Deserialized_rdd__')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
cur_sc = rdd.ctx
sf_names = None
sf_types = None
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
if(rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}):
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
sf_types = [type(i) for i in first_row.values()]
else:
sf_names = first_row.__FIELDS__
sf_types = [type(i) for i in first_row]
sf_names = [str(i) for i in sf_names]
for _type in sf_types:
if(_type != int and _type != str and _type != float and _type != unicode):
raise TypeError("Only int, str, and float are supported for now")
types = ""
for i in sf_types:
types += i.__name__ + ","
if cur_sc.master[0:5] == 'local':
t = rdd._jschema_rdd.toJavaStringOfValues().pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " + tmp_loc +\
" " + types)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.toJavaStringOfValues(
rdd._jschema_rdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc + " " + types)
else:
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def from_odbc(cls, db, sql, verbose=False):
"""
Convert a table or query from a database to an SFrame.
This function does not do any checking on the given SQL query, and
cannot know what effect it will have on the database. Any side effects
from the query will be reflected on the database. If no result
rows are returned, an empty SFrame is created.
Keep in mind the default case your database stores table names in. In
some cases, you may need to add quotation marks (or whatever character
your database uses to quote identifiers), especially if you created the
table using `to_odbc`.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
sql : str
A SQL query. The query must be acceptable by the ODBC driver used by
`graphlab.extensions._odbc_connection.unity_odbc_connection`.
Returns
-------
out : SFrame
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
does not apply to the machine your database is running, which can (and
often will) be running on a separate machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> a_table = graphlab.SFrame.from_odbc(db, "SELECT * FROM a_table")
>>> join_result = graphlab.SFrame.from_odbc(db, 'SELECT * FROM "MyTable" a, "AnotherTable" b WHERE a.id=b.id')
"""
result = db.execute_query(sql)
if not isinstance(result, SFrame):
raise RuntimeError("Cannot create an SFrame for query. No result set.")
cls = result
return cls
def to_odbc(self, db, table_name, append_if_exists=False, verbose=True):
"""
Convert an SFrame to a table in a database.
By default, searches for a table in the database with the given name.
If found, this will attempt to append all the rows of the SFrame to the
end of the table. If not, this will create a new table with the given
name. This behavior is toggled with the `append_if_exists` flag.
When creating a new table, GraphLab Create uses a heuristic approach to
pick a corresponding type for each column in the SFrame using the type
information supplied by the database's ODBC driver. Your driver must
support giving this type information for GraphLab Create to support
writing to the database.
To allow more expressive and accurate naming, `to_odbc` puts quotes
around each identifier (table names and column names). Depending on
your database, you may need to refer to the created table with quote
characters around the name. This character is not the same for all
databases, but '"' is the most common.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
table_name : str
The name of the table you would like to create/append to.
append_if_exists : bool
If True, this will attempt to append to the table named `table_name`
if it is found to exist in the database.
verbose : bool
Print progress updates on the insertion process.
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
"local machine" rule does not apply to the machine your database is
running on, which can (and often will) be running on a separate
machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> sf = graphlab.SFrame({'a':[1,2,3],'b':['hi','pika','bye']})
>>> sf.to_odbc(db, 'a_cool_table')
"""
if (not verbose):
glconnect.get_client().set_log_progress(False)
db._insert_sframe(self, table_name, append_if_exists)
if (not verbose):
glconnect.get_client().set_log_progress(True)
def __repr__(self):
"""
Returns a string description of the frame
"""
printed_sf = self._imagecols_to_stringcols()
ret = self.__get_column_description__()
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
if (len(printed_sf.head()) > 0):
ret = ret + str(self)
else:
ret = ret + "\t[]"
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(self, wrap_text=False, max_row_width=80,
max_column_width=30, max_columns=20,
max_rows_to_display=60):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype() is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if (type(value) is array.array):
return str(list(value))
elif (type(value) is list):
return '[' + ", ".join(_value_to_str(x) for x in value) + ']'
else:
return str(value)
def _escape_space(s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def _truncate_respect_unicode(s, max_length):
if (len(s) <= max_length):
return s
else:
u = unicode(s, 'utf-8', errors='replace')
return u[:max_length].encode('utf-8')
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
return unicode(s, 'utf-8', errors='replace')
else:
ret = ''
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(last_line, max_column_width - 4)
ret = wrapped_lines[0] + '\n' + last_line + ' ...'
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + '...'
return unicode(ret, 'utf-8', errors='replace')
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(max_column_width, max(len(str(x)) for x in headsf[col]))
else:
col_width = max_column_width
if (table_width + col_width < max_row_width):
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]])
table_width = str(tbl).find('\n')
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = 'c'
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_cols() > max_columns:
row_of_tables[-1].add_column('...', ['...'] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(['...'] * num_column_of_last_table)
return row_of_tables
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
See Also
--------
head, tail
"""
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == graphlab.Image]
#If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
if len(image_column_names) > 0:
printed_sf = SFrame()
for t in names:
if t in image_column_names:
printed_sf[t] = self[t]._head_str(num_rows)
else:
printed_sf[t] = self[t].head(num_rows)
else:
printed_sf = self
return printed_sf
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if (not footer):
return '\n'.join([str(tb) for tb in row_of_tables])
if self.__has_size__():
footer = '[%d rows x %d columns]\n' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '\n'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]\n' % self.num_columns()
footer += '\n'.join(LAZY_FOOTER_STRS)
return '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True, max_row_width=120, max_columns=40, max_column_width=25, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if self.__has_size__():
footer = '[%d rows x %d columns]<br/>' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '<br/>'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]<br/>' % self.num_columns()
footer += '<br/>'.join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = '\n</div>'
return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_cols(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_columns, num_rows
"""
return self.__proxy__.num_columns()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_cols, num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
assert HAS_PANDAS
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None, seed=None):
"""
Transform each row to an :class:`~graphlab.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = graphlab.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
double mean(const std::map<flexible_type, flexible_type>& dict) {
double sum = 0.0;
for (const auto& kv: dict) sum += (double)kv.second;
return sum / dict.size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(mean, "row");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sf = graphlab.SFrame({'x0': [1, 2, 3], 'x1': [2, 3, 1],
... 'x2': [3, 1, 2]})
>>> sf.apply(example.mean)
dtype: float
Rows: 3
[2.0,2.0,2.0]
"""
assert _is_callable(fn), "Input must be a function"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype()
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.apply')
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = graphlab.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.flat_map')
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError, \
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types."
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed))
def sample(self, fraction, seed=None):
"""
Sample the current SFrame's rows.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch. Must be between 0 and 1.
The number of rows returned is approximately the fraction times the
number of rows.
seed : int, optional
Seed for the random number generator used to sample.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if not seed:
seed = int(time.time())
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
_mt._get_metric_tracker().track('sframe.sample')
if (self.num_rows() == 0 or self.num_cols() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed))
def random_split(self, fraction, seed=None):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch for the first returned
SFrame. Must be between 0 and 1.
seed : int, optional
Seed for the random number generator used to split.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = graphlab.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print len(sf_train), len(sf_test)
922 102
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return (SFrame(), SFrame())
if not seed:
seed = int(time.time())
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
_mt._get_metric_tracker().track('sframe.random_split')
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed)
return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1]))
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = graphlab.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
_mt._get_metric_tracker().track('sframe.topk')
sf = self[self[column_name].topk_index(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
_mt._get_metric_tracker().track('sframe.save', properties={'format':format})
if format == None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
else:
raise ValueError("Unsupported format: {}".format(format))
def select_column(self, key):
"""
Get a reference to the :class:`~graphlab.SArray` that corresponds with
the given key. Throws an exception if the key is something other than a
string or if the key is not found.
Parameters
----------
key : str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``key``.
See Also
--------
select_columns
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(key, str):
raise TypeError("Invalid key type: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(key))
def select_columns(self, keylist):
"""
Get SFrame composed only of the columns referred to in the given list of
keys. Throws an exception if ANY of the keys are not in this SFrame or
if ``keylist`` is anything other than a list of strings.
Parameters
----------
keylist : list[str]
The list of column names.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``keylist`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not hasattr(keylist, '__iter__'):
raise TypeError("keylist must be an iterable")
if not all([isinstance(x, str) for x in keylist]):
raise TypeError("Invalid key type: must be str")
key_set = set(keylist)
if (len(key_set)) != len(keylist):
for key in key_set:
if keylist.count(key) > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(keylist))
def add_column(self, data, name=""):
"""
Add a column to this SFrame. The number of elements in the data given
must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self. If no
name is given, a default name is chosen.
Parameters
----------
data : SArray
The 'column' of data to add.
name : string, optional
The name of the column. If no name is given, a default name is
chosen.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = graphlab.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalant to `sf['species'] = sa`
>>> sf.add_column(sa, name='species')
>>> sf
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
with cython_context():
self.__proxy__.add_column(data.__proxy__, name)
return self
def add_columns(self, data, namelist=None):
"""
Adds multiple columns to this SFrame. The number of elements in all
columns must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
namelist : list of string, optional
A list of column names. All names must be specified. ``namelist`` is
ignored if data is an SFrame.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = graphlab.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> sf.add_columns(sf2)
>>> sf
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
namelist = other.column_names()
my_columns = set(self.column_names())
for name in namelist:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list : must all be str")
with cython_context():
self.__proxy__.add_columns([x.__proxy__ for x in datalist], namelist)
return self
def remove_column(self, name):
"""
Remove a column from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
name : string
The name of the column to remove.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> sf.remove_column('val')
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
colid = self.column_names().index(name)
with cython_context():
self.__proxy__.remove_column(colid)
return self
def remove_columns(self, column_names):
"""
Remove one or more columns from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> sf.remove_columns(['val1', 'val2'])
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
for colid in reversed(deletion_indices):
with cython_context():
self.__proxy__.remove_column(colid)
return self
def swap_columns(self, column_1, column_2):
"""
Swap the columns with the given names. This operation modifies the
current SFrame in place and returns self.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf.swap_columns('id', 'val')
>>> sf
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_1)
colid_2 = colnames.index(column_2)
with cython_context():
self.__proxy__.swap_columns(colid_1, colid_2)
return self
def rename(self, names):
"""
Rename the given columns. ``names`` is expected to be a dict specifying
the old and new names. This changes the names of the columns given as
the keys and replaces them with the names given as the values. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> sf.rename({'X1': 'name', 'X2':'address'})
>>> sf
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
with cython_context():
for k in names:
colid = self.column_names().index(k)
self.__proxy__.set_column_name(colid, names[k])
return self
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
Calls `select_column` on `key`
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif type(key) is list:
return self.select_columns(key)
elif type(key) is str:
return self.select_column(key)
elif type(key) is int:
if key < 0:
key = len(self) + key
if key >= len(self):
raise IndexError("SFrame index out of range")
return list(SFrame(_proxy = self.__proxy__.copy_range(key, 1, key+1)))[0]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key)
elif type(key) is str:
sa_value = None
if (type(value) is SArray):
sa_value = value
elif hasattr(value, '__iter__'): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = (self.num_cols() == 1)
if (single_column):
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key)
else:
# add the column to a unique column name.
tmpname = '__' + '-'.join(self.column_names())
try:
self.add_column(sa_value, tmpname)
except Exception as e:
if (single_column):
self.add_column(saved_column, key)
raise
if (not single_column):
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname)
self.remove_column(key)
self.rename({tmpname: key})
else:
raise TypeError('Cannot set column with key type ' + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key)
def __materialize__(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
_mt._get_metric_tracker().track('sframe.__iter__')
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while(True):
for j in ret:
yield dict(zip(column_names, j))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = graphlab.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.append')
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break;
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name);
processed_other_frame.add_column(other_column, col_name)
# check column type
if my_column_types[i] != other_column.dtype():
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype()))
with cython_context():
processed_other_frame.__materialize__()
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__))
def groupby(self, key_columns, operations, *args):
"""
Perform a group on the key_columns followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~graphlab.aggregate` for more detail on the aggregators.
Parameters
----------
key_columns : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import graphlab.aggregate as agg
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_columns='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_columns='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_columns is a list
if isinstance(key_columns, str):
key_columns = [key_columns]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_columns:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column " + column + " does not exist in SFrame")
if self[column].dtype() == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not(isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.")
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for (col,output) in zip(column[0],key):
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition of output column: " + key)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not graphlab.aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
_mt._get_metric_tracker().track('sframe.groupby', properties={'operator':op})
with cython_context():
return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns,
group_output_columns, group_ops))
def join(self, right, on=None, how='inner'):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
Returns
-------
out : SFrame
Examples
--------
>>> animals = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = graphlab.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.join', properties={'type':how})
available_join_types = ['left','right','outer','inner']
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~graphlab.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.filter_by')
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not hasattr(values, '__iter__'):
values = [values]
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name)
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
existing_type = self.column_types()[self.column_names().index(column_name)]
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError("Type of given values does not match type of column '" +
column_name + "' in SFrame.")
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
@_check_canvas_enabled
def show(self, columns=None, view=None, x=None, y=None):
"""
show(columns=None, view=None, x=None, y=None)
Visualize the SFrame with GraphLab Create :mod:`~graphlab.canvas`. This function
starts Canvas if it is not already running. If the SFrame has already been plotted,
this function will update the plot.
Parameters
----------
columns : list of str, optional
The columns of this SFrame to show in the SFrame view. In an
interactive browser target of Canvas, the columns will be selectable
and reorderable through the UI as well. If not specified, the
SFrame view will use all columns of the SFrame.
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on which Canvas target is set).
- 'Table': Show a scrollable, tabular view of the data in the
SFrame.
- 'Summary': Show a list of columns with some summary statistics
and plots for each column.
- 'Scatter Plot': Show a scatter plot of two numeric columns.
- 'Heat Map': Show a heat map of two numeric columns.
- 'Bar Chart': Show a bar chart of one numeric and one categorical
column.
- 'Line Chart': Show a line chart of one numeric and one
categorical column.
x : str, optional
The column to use for the X axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the columns
in this SFrame. For Scatter Plot and Heat Map, the column must be
numeric (int or float). If not set, defaults to the first available
valid column.
y : str, optional
The column to use for the Y axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the numeric
columns in this SFrame. If not set, defaults to the second
available numeric column.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view.
See Also
--------
canvas
Examples
--------
Suppose 'sf' is an SFrame, we can view it in GraphLab Canvas using:
>>> sf.show()
To choose a column filter (applied to all SFrame views):
>>> sf.show(columns=["Foo", "Bar"]) # use only columns 'Foo' and 'Bar'
>>> sf.show(columns=sf.column_names()[3:7]) # use columns 3-7
To choose a specific view of the SFrame:
>>> sf.show(view="Summary")
>>> sf.show(view="Table")
>>> sf.show(view="Bar Chart", x="col1", y="col2")
>>> sf.show(view="Line Chart", x="col1", y="col2")
>>> sf.show(view="Scatter Plot", x="col1", y="col2")
>>> sf.show(view="Heat Map", x="col1", y="col2")
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sframe
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sframe.SFrameView(self, params={
'view': view,
'columns': columns,
'x': x,
'y': y
}))
def pack_columns(self, columns=None, column_prefix=None, dtype=list,
fill_na=None, remove_prefix=True, new_column_name=None):
"""
Pack two or more columns of the current SFrame into one single
column.The result is a new SFrame with the unaffected columns from the
original SFrame plus the newly created column.
The list of columns that are packed is chosen through either the
``columns`` or ``column_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns`` explicitly specifies the list of
columns to pack, while ``column_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
columns : list[str], optional
A list of column names to be packed. There needs to have at least
two columns to pack. If omitted and `column_prefix` is not
specified, all columns from current SFrame are packed. This
parameter is mutually exclusive with the `column_prefix` parameter.
column_prefix : str, optional
Pack all columns with the given `column_prefix`.
This parameter is mutually exclusive with the `columns` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- There must be at least two columns to pack.
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = graphlab.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_prefix='category')
+----------+--------------------+
| business | X2 |
+----------+--------------------+
| 1 | [1, 1, None, 1] |
| 2 | [None, 1, 1, 1] |
| 3 | [1, None, 1, None] |
| 4 | [None, 1, None, 1] |
+----------+--------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
... new_column_name='category')
+----------+--------------------------------+
| business | category |
+----------+--------------------------------+
| 1 | {'food': 1, 'shop': 1, 're ... |
| 2 | {'food': 1, 'shop': 1, 'se ... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'food': 1, 'shop': 1} |
+----------+--------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
remove_prefix=False)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | {'category.retail': 1, 'ca ... |
| 2 | {'category.food': 1, 'cate ... |
| 3 | {'category.retail': 1, 'ca ... |
| 4 | {'category.food': 1, 'cate ... |
+----------+--------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(columns = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> sf.pack_columns(column_prefix="category", dtype=array.array,
... fill_na=0)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | array('d', [1.0, 1.0, 0.0, ... |
| 2 | array('d', [0.0, 1.0, 1.0, ... |
| 3 | array('d', [1.0, 0.0, 1.0, ... |
| 4 | array('d', [0.0, 1.0, 0.0, ... |
+----------+--------------------------------+
[4 rows x 2 columns]
"""
if columns != None and column_prefix != None:
raise ValueError("'columns' and 'column_prefix' parameter cannot be given at the same time.")
if new_column_name == None and column_prefix != None:
new_column_name = column_prefix
if column_prefix != None:
if type(column_prefix) != str:
raise TypeError("'column_prefix' must be a string")
columns = [name for name in self.column_names() if name.startswith(column_prefix)]
if len(columns) == 0:
raise ValueError("There is no column starts with prefix '" + column_prefix + "'")
elif columns == None:
columns = self.column_names()
else:
if not hasattr(columns, '__iter__'):
raise TypeError("columns must be an iterable type")
column_names = set(self.column_names())
for column in columns:
if (column not in column_names):
raise ValueError("Current SFrame has no column called '" + str(column) + "'.")
# check duplicate names
if len(set(columns)) != len(columns):
raise ValueError("There is duplicate column names in columns parameter")
if (len(columns) <= 1):
raise ValueError("Please provide at least two columns to pack")
if (dtype not in (dict, list, array.array)):
raise ValueError("Resulting dtype has to be one of dict/array.array/list type")
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na != None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all columns have to be numeric type
for column in columns:
if self[column].dtype() not in (int, float):
raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type")
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (dtype == dict) and (column_prefix != None) and (remove_prefix == True):
size_prefix = len(column_prefix)
first_char = set([c[size_prefix:size_prefix+1] for c in columns])
if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']):
dict_keys = [name[size_prefix+1:] for name in columns]
else:
dict_keys = [name[size_prefix:] for name in columns]
else:
dict_keys = columns
rest_columns = [name for name in self.column_names() if name not in columns]
if new_column_name != None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError("Current SFrame already contains a column name " + new_column_name)
else:
new_column_name = ""
_mt._get_metric_tracker().track('sframe.pack_columns')
ret_sa = None
with cython_context():
ret_sa = SArray(_proxy=self.__proxy__.pack_columns(columns, dict_keys, dtype, fill_na))
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name)
return new_sf
def split_datetime(self, expand_column, column_name_prefix=None, limit=None, tzone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`graphlab.SArray.split_datetim()`
Parameters
----------
expand_column : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit : list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if expand_column not in self.column_names():
raise KeyError("column '" + expand_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = expand_column
new_sf = self[expand_column].split_datetime(column_name_prefix, limit, tzone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != expand_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.split_datetime')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def unpack(self, unpack_column, column_name_prefix=None, column_types=None,
na_value=None, limit=None):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~graphlab.SArray.unpack()`.
Parameters
----------
unpack_column : str
Name of the unpacked column
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+----+-----------------------------+
| id | friends |
+----+-----------------------------+
| 1 | array('d', [1.0, 2.0, 3.0]) |
| 2 | array('d', [2.0, 3.0, 4.0]) |
| 3 | array('d', [3.0, 4.0, 5.0]) |
+----+-----------------------------+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
"""
if unpack_column not in self.column_names():
raise KeyError("column '" + unpack_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = unpack_column
new_sf = self[unpack_column].unpack(column_name_prefix, column_types, na_value, limit)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != unpack_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.unpack')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def stack(self, column_name, new_column_name=None, drop_na=False):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The new SFrame includes the newly created column and all columns other
than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = graphlab.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set dropna=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = graphlab.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+------+--------+
| user | friend |
+------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype()
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
if (new_column_name != None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val != None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
_mt._get_metric_tracker().track('sframe.stack')
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name, new_column_name, new_column_type, drop_na))
def unstack(self, column, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column`` is a numeric column, the result will be of
array.array type. If ``column`` is a non-numeric column, the new column
will be of list type. If ``column`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~graphlab.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = graphlab.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = graphlab.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='friends')
+------+-----------------------------+
| user | friends |
+------+-----------------------------+
| 3 | array('d', [5.0]) |
| 1 | array('d', [2.0, 4.0, 3.0]) |
| 2 | array('d', [5.0, 6.0, 4.0]) |
| 4 | array('d', [2.0, 3.0]) |
+------+-----------------------------+
[4 rows x 2 columns]
"""
if (type(column) != str and len(column) != 2):
raise TypeError("'column' parameter has to be either a string or a list of two strings.")
_mt._get_metric_tracker().track('sframe.unstack')
with cython_context():
if type(column) == str:
key_columns = [i for i in self.column_names() if i != column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name : graphlab.aggregate.CONCAT(column)})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column))
elif len(column) == 2:
key_columns = [i for i in self.column_names() if i not in column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name:graphlab.aggregate.CONCAT(column[0], column[1])})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column[0], column[1]))
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(),{})
def sort(self, sort_columns, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
sort_columns : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `sort_columns` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = graphlab.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate sort_columns
if (type(sort_columns) == str):
sort_column_names = [sort_columns]
elif (type(sort_columns) == list):
if (len(sort_columns) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in sort_columns])
if (len(first_param_types) != 1):
raise ValueError("sort_columns element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in sort_columns]
sort_column_orders = [i[1] for i in sort_columns]
elif(first_param_type == str):
sort_column_names = sort_columns
else:
raise TypeError("sort_columns type is not supported")
else:
raise TypeError("sort_columns type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype() not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
_mt._get_metric_tracker().track('sframe.sort')
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders))
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False))
def dropna_split(self, columns=None, how='any'):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~graphlab.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~graphlab.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna_split')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ['any','all']:
raise ValueError("Must specify 'any' or 'all'")
if how == 'all':
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column) is not str:
raise TypeError("Must give column name as a str")
ret = self[self.column_names()]
ret[column] = ret[column].fillna(value)
return ret
def add_row_number(self, column_name='id', start=0):
"""
Returns a new SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.add_row_number')
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name)
new_sf.add_columns(self)
return new_sf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_cols())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._proxy = value
| agpl-3.0 |
kmkolasinski/Quantulaba | plots/plot_lattice.py | 2 | 1492 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import csv
from matplotlib.collections import LineCollection
file = "lattice.dat"
#ax = plt.gca(projection='3d')
pscale=1.0
lscale=10.0
fig, ax = plt. subplots()
ax.set_aspect('equal')
desired=[1,2]
with open(file, 'r') as fin:
reader=csv.reader(fin)
result=[[(s) for s in row] for i,row in enumerate(reader) if i in desired]
minCorner = map(float,result[0][0].split())
maxCorner = map(float,result[1][0].split())
xWidth = abs(minCorner[0]-maxCorner[0])
yWidth = abs(minCorner[1]-maxCorner[1])
zWidth = abs(minCorner[2]-maxCorner[2])
ax.scatter(minCorner[0],minCorner[1],s=0)
ax.scatter(maxCorner[0],maxCorner[1],s=0)
ax.margins(0.1)
data = np.loadtxt(file,skiprows=4)
no_lines = np.size(data[:,0])
wlist = []
lines = []
for i in range(no_lines):
lines.append([ (data[i,0],data[i,1]) , (data[i,3],data[i,4]) ])
wlist.extend([data[i,6]*lscale])
lc = LineCollection(lines, linewidths=wlist,colors='black',lw=1.0)
ax.add_collection(lc)
wlist = []
points = []
for i in range(no_lines):
if(data[i,6] > 1.0):
points.append([data[i,0],data[i,1] ])
wlist.extend([data[i,6]*pscale])
points = np.array(points)
wlist = np.array(wlist)
if(np.size(points) > 0):
ax.scatter(points[:,0],points[:,1], cmap='PuBu', c=wlist , s=50 , edgecolors='k' , zorder=2 )
plt.savefig("lattice.pdf") | mit |
thorwhalen/ut | ml/stream/sequences.py | 1 | 6137 |
from sklearn.base import BaseEstimator
from collections import Counter
import pandas as pd
from numpy import sum, nan, isnan
from ut.util.uiter import window
class NextElementPredictor(BaseEstimator):
def predict(self, seqs):
preds = self.predict_proba(seqs)
return [max(pred, key=lambda key: pred[key]) for pred in preds]
def predict_proba(self, seqs):
return list(map(self._predict_proba_conditioned_on_recent_subseq, seqs))
def _predict_proba_conditioned_on_recent_subseq(self, recent_subseq):
raise NotImplementedError("Need to implement this method")
class MarkovNextElementPred(NextElementPredictor):
_list_of_attributes_to_display = ['markov_window', 'empty_element', 'keep_stats_in_memory']
def __init__(self, markov_window=2, empty_element=-1, keep_stats_in_memory=True):
self.markov_window = markov_window
self.keep_stats_in_memory = keep_stats_in_memory
self.empty_element = empty_element
self._empty_element_padding = [empty_element] * (self.markov_window - 1)
@property
def total_tuple_count(self):
"""
:return: Number of observed window tuples (sum of values in self.snip_tuples_counter_)
"""
if self.total_tuple_count_ is not None:
return self.total_tuple_count_
else:
total_tuple_count_ = sum(self.snip_tuples_counter_.values())
if self.keep_stats_in_memory:
self.total_tuple_count_ = total_tuple_count_
return total_tuple_count_
@property
def pair_prob(self):
"""
:return: Series of probabilities (unsmoothed count ratios) indexed by snip pairs
"""
if self.pair_prob_ is not None:
return self.pair_prob_
else:
pair_prob_ = pd.Series(self.snip_tuples_counter_) / self.total_tuple_count
if self.keep_stats_in_memory:
self.pair_probs_ = pair_prob_
return pair_prob_
@property
def element_prob(self):
"""
:return: Series of snips probabilities (unsmoothed count ratios)
"""
if self.element_prob_ is not None:
return self.element_prob_
else:
element_prob_ = (self.pair_prob * self.total_tuple_count)
element_prob_ = element_prob_.groupby(level=0).sum()
element_prob_ = element_prob_.drop(labels=self.empty_element)
# element_prob_ = element_prob_.iloc[
# element_prob_.index.get_level_values(level=0) != self.empty_element]
element_prob_ /= element_prob_.sum()
if self.keep_stats_in_memory:
self.element_prob_ = element_prob_
return element_prob_
@property
def conditional_prob(self):
"""
:return: Series of probabilities of last element (level) conditional on previous ones (including empty elements)
"""
if self.conditional_prob_ is not None:
return self.conditional_prob_
else:
conditional_prob_ = self._drop_empty_elements_of_sr(self.pair_prob, levels=[self.markov_window - 1])
conditional_levels = list(range(self.markov_window - 1))
conditional_prob_ = conditional_prob_.div(
conditional_prob_.groupby(level=conditional_levels).sum(), level=0) # TODO: Only works for two levels
if self.keep_stats_in_memory:
self.conditional_prob_ = conditional_prob_
return conditional_prob_
@property
def initial_element_prob(self):
"""
:return: Series of snips probabilities (unsmoothed count ratios)
"""
if self.initial_element_prob_ is not None:
return self.initial_element_prob_
else:
initial_element_prob_ = self.pair_prob.xs(self.empty_element, level=0, drop_level=True)
initial_element_prob_ /= initial_element_prob_.sum()
if self.keep_stats_in_memory:
self.initial_element_prob_ = initial_element_prob_
return initial_element_prob_
def fit(self, snips_list):
# reset anything previously learned
self._initialize_params()
return self.partial_fit(snips_list)
def partial_fit(self, snips_list):
if not set(['snip_tuples_counter_']).issubset(list(self.__dict__.keys())):
self._initialize_params()
for snips in snips_list:
self._partial_fit_of_a_single_snips(snips)
return self
def _initialize_params(self):
"""
Initializes model params (the snip_tuples_counter_, etc.)
:return: None
"""
self.snip_tuples_counter_ = Counter()
self._reset_properties()
def _reset_properties(self):
"""
Resets some properties that depend on snip_tuples_counter_ to be computed (is used when the later changes)
These will be recomputed when requested.
:return: None
"""
self.total_tuple_count_ = None
self.pair_prob_ = None
self.element_prob_ = None
self.initial_element_prob_ = None
self.conditional_prob_ = None
def _partial_fit_of_a_single_snips(self, snips):
self._reset_properties()
self.snip_tuples_counter_.update(window(self._empty_element_padding + list(snips) + self._empty_element_padding,
n=self.markov_window))
def _drop_empty_elements_of_sr(self, sr, levels=None, renormalize=False):
if levels is None:
levels = list(range(self.markov_window))
for level in levels:
sr = sr.drop(labels=self.empty_element, level=level)
if renormalize:
sr /= sr.sum()
return sr
def _predict_proba_conditioned_on_recent_subseq(self, recent_subseq):
pass
def __repr__(self):
d = {attr: getattr(self, attr) for attr in self._list_of_attributes_to_display if attr in self.__dict__}
d['total_tuple_count'] = self.total_tuple_count
return self.__class__.__name__ + '\n' + str(d)
| mit |
jmschrei/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/py2app/build_app.py | 9 | 77527 | """
Mac OS X .app build command for distutils
Originally (loosely) based on code from py2exe's build_exe.py by Thomas Heller.
"""
from __future__ import print_function
import imp
import sys
import os
import zipfile
import plistlib
import shlex
import shutil
import textwrap
import pkg_resources
import collections
from modulegraph import modulegraph
from py2app.apptemplate.setup import main as script_executable
from py2app.util import mergecopy, make_exec
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from itertools import chain
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from modulegraph.find_modules import find_modules, parse_mf_results, find_needed_modules
from modulegraph.modulegraph import SourceModule, Package, Script
from modulegraph import zipio
import macholib.dyld
import macholib.MachOStandalone
import macholib.MachO
from macholib.util import flipwritable
from py2app.create_appbundle import create_appbundle
from py2app.create_pluginbundle import create_pluginbundle
from py2app.util import \
fancy_split, byte_compile, make_loader, imp_find_module, \
copy_tree, fsencoding, strip_files, in_system_path, makedirs, \
iter_platform_files, find_version, skipscm, momc, copy_file, \
copy_resource
from py2app.filters import \
not_stdlib_filter, not_system_filter, has_filename_filter
from py2app import recipes
from distutils.sysconfig import get_config_var, get_config_h_filename
PYTHONFRAMEWORK=get_config_var('PYTHONFRAMEWORK')
PLUGIN_SUFFIXES = {
'.qlgenerator': 'QuickLook',
'.mdimporter': 'Spotlight',
'.xpc': 'XPCServices',
'.service': 'Services',
'.prefPane': 'PreferencePanes',
'.iaplugin': 'InternetAccounts',
'.action': 'Automator',
}
try:
basestring
except NameError:
basestring = str
def rewrite_tkinter_load_commands(tkinter_path):
print("rewrite_tk", tkinter_path)
m = macholib.MachO.MachO(tkinter_path)
tcl_path = None
tk_path = None
rewrite_map = {}
for header in m.headers:
for idx, name, other in header.walkRelocatables():
if other.endswith('/Tk'):
if tk_path is not None and other != tk_path:
raise DistutilsPlatformError('_tkinter is linked to different Tk paths')
tk_path = other
elif other.endswith('/Tcl'):
if tcl_path is not None and other != tcl_path:
raise DistutilsPlatformError('_tkinter is linked to different Tcl paths')
tcl_path = other
if tcl_path is None or 'Tcl.framework' not in tcl_path:
raise DistutilsPlatformError('_tkinter is not linked a Tcl.framework')
if tk_path is None or 'Tk.framework' not in tk_path:
raise DistutilsPlatformError('_tkinter is not linked a Tk.framework')
system_tcl_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tcl.framework/Versions') if nm != 'Current']
system_tk_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tk.framework/Versions') if nm != 'Current']
if not tcl_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tcl
ver = os.path.basename(os.path.dirname(tcl_path))
if ver not in system_tcl_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tcl not in /System')
rewrite_map[tcl_path] = '/System/Library/Frameworks/Tcl.framework/Versions/%s/Tcl'%(ver,)
if not tk_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tk
ver = os.path.basename(os.path.dirname(tk_path))
if ver not in system_tk_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tk not in /System')
rewrite_map[tk_path] = '/System/Library/Frameworks/Tk.framework/Versions/%s/Tk'%(ver,)
if rewrite_map:
print("Relinking _tkinter.so to system Tcl/Tk")
rewroteAny = False
for header in m.headers:
for idx, name, other in header.walkRelocatables():
data = rewrite_map.get(other)
if data:
if header.rewriteDataForCommand(idx, data.encode(sys.getfilesystemencoding())):
rewroteAny = True
if rewroteAny:
old_mode = flipwritable(m.filename)
try:
with open(m.filename, 'rb+') as f:
for header in m.headers:
f.seek(0)
header.write(f)
f.seek(0, 2)
f.flush()
finally:
flipwritable(m.filename, old_mode)
else:
print("_tkinter already linked against system Tcl/Tk")
def get_zipfile(dist, semi_standalone=False):
if sys.version_info[0] == 3:
if semi_standalone:
return "python%d.%d/site-packages.zip"%(sys.version_info[:2])
else:
return "python%d%d.zip"%(sys.version_info[:2])
return getattr(dist, "zipfile", None) or "site-packages.zip"
def framework_copy_condition(src):
# Skip Headers, .svn, and CVS dirs
return skipscm(src) and os.path.basename(src) != 'Headers'
class PythonStandalone(macholib.MachOStandalone.MachOStandalone):
def __init__(self, appbuilder, *args, **kwargs):
super(PythonStandalone, self).__init__(*args, **kwargs)
self.appbuilder = appbuilder
def copy_dylib(self, src):
dest = os.path.join(self.dest, os.path.basename(src))
if os.path.islink(src):
dest = os.path.join(self.dest, os.path.basename(os.path.realpath(src)))
# Ensure that the orginal name also exists, avoids problems when
# the filename is used from Python (see issue #65)
#
# NOTE: The if statement checks that the target link won't
# point to itself, needed for systems like homebrew that
# store symlinks in "public" locations that point to
# files of the same name in a per-package install location.
link_dest = os.path.join(self.dest, os.path.basename(src))
if os.path.basename(link_dest) != os.path.basename(dest):
os.symlink(os.path.basename(dest), link_dest)
else:
dest = os.path.join(self.dest, os.path.basename(src))
return self.appbuilder.copy_dylib(src, dest)
def copy_framework(self, info):
destfn = self.appbuilder.copy_framework(info, self.dest)
dest = os.path.join(self.dest, info['shortname'] + '.framework')
self.pending.append((destfn, iter_platform_files(dest)))
return destfn
def iterRecipes(module=recipes):
for name in dir(module):
if name.startswith('_'):
continue
check = getattr(getattr(module, name), 'check', None)
if check is not None:
yield (name, check)
# A very loosely defined "target". We assume either a "script" or "modules"
# attribute. Some attributes will be target specific.
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
# If modules is a simple string, assume they meant list
m = self.__dict__.get("modules")
if m and isinstance(m, basestring):
self.modules = [m]
def get_dest_base(self):
dest_base = getattr(self, "dest_base", None)
if dest_base: return dest_base
script = getattr(self, "script", None)
if script:
return os.path.basename(os.path.splitext(script)[0])
modules = getattr(self, "modules", None)
assert modules, "no script, modules or dest_base specified"
return modules[0].split(".")[-1]
def validate(self):
resources = getattr(self, "resources", [])
for r_filename in resources:
if not os.path.isfile(r_filename):
raise DistutilsOptionError(
"Resource filename '%s' does not exist" % (r_filename,))
def validate_target(dist, attr, value):
res = FixupTargets(value, "script")
other = {"app": "plugin", "plugin": "app"}
if res and getattr(dist, other[attr]):
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
def FixupTargets(targets, default_attribute):
if not targets:
return targets
try:
targets = eval(targets)
except:
pass
ret = []
for target_def in targets:
if isinstance(target_def, basestring):
# Create a default target object, with the string as the attribute
target = Target(**{default_attribute: target_def})
else:
d = getattr(target_def, "__dict__", target_def)
if default_attribute not in d:
raise DistutilsOptionError(
"This target class requires an attribute '%s'"
% (default_attribute,))
target = Target(**d)
target.validate()
ret.append(target)
return ret
def normalize_data_file(fn):
if isinstance(fn, basestring):
fn = convert_path(fn)
return ('', [fn])
return fn
def is_system():
prefix = sys.prefix
if os.path.exists(os.path.join(prefix, ".Python")):
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
return in_system_path(prefix)
def installation_info(version=None):
if version is None:
version = sys.version
if is_system():
return version[:3] + " (FORCED: Using vendor Python)"
else:
return version[:3]
class py2app(Command):
description = "create a Mac OS X application or plugin from Python scripts"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [
("app=", None,
"application bundle to be built"),
("plugin=", None,
"plugin bundle to be built"),
('optimize=', 'O',
"optimization level: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
("includes=", 'i',
"comma-separated list of modules to include"),
("packages=", 'p',
"comma-separated list of packages to include"),
("iconfile=", None,
"Icon file to use"),
("excludes=", 'e',
"comma-separated list of modules to exclude"),
("dylib-excludes=", 'E',
"comma-separated list of frameworks or dylibs to exclude"),
("datamodels=", None,
"xcdatamodels to be compiled and copied into Resources"),
("mappingmodels=", None,
"xcmappingmodels to be compiled and copied into Resources"),
("resources=", 'r',
"comma-separated list of additional data files and folders to include (not for code!)"),
("frameworks=", 'f',
"comma-separated list of additional frameworks and dylibs to include"),
("plist=", 'P',
"Info.plist template file, dict, or plistlib.Plist"),
("extension=", None,
"Bundle extension [default:.app for app, .plugin for plugin]"),
("graph", 'g',
"output module dependency graph"),
("xref", 'x',
"output module cross-reference as html"),
("no-strip", None,
"do not strip debug and local symbols from output"),
#("compressed", 'c',
# "create a compressed zipfile"),
("no-chdir", 'C',
"do not change to the data directory (Contents/Resources) [forced for plugins]"),
#("no-zip", 'Z',
# "do not use a zip file (XXX)"),
("semi-standalone", 's',
"depend on an existing installation of Python " + installation_info()),
("alias", 'A',
"Use an alias to current source file (for development only!)"),
("argv-emulation", 'a',
"Use argv emulation [disabled for plugins]."),
("argv-inject=", None,
"Inject some commands into the argv"),
("emulate-shell-environment", None,
"Emulate the shell environment you get in a Terminal window"),
("use-pythonpath", None,
"Allow PYTHONPATH to effect the interpreter's environment"),
("use-faulthandler", None,
"Enable the faulthandler in the generated bundle (Python 3.3 or later)"),
("verbose-interpreter", None,
"Start python in verbose mode"),
('bdist-base=', 'b',
'base directory for build library (default is build)'),
('dist-dir=', 'd',
"directory to put final built distributions in (default is dist)"),
('site-packages', None,
"include the system and user site-packages into sys.path"),
("strip", 'S',
"strip debug and local symbols from output (on by default, for compatibility)"),
("prefer-ppc", None,
"Force application to run translated on i386 (LSPrefersPPC=True)"),
('debug-modulegraph', None,
'Drop to pdb console after the module finding phase is complete'),
("debug-skip-macholib", None,
"skip macholib phase (app will not be standalone!)"),
("arch=", None, "set of architectures to use (fat, fat3, universal, intel, i386, ppc, x86_64; default is the set for the current python binary)"),
("qt-plugins=", None, "set of Qt plugins to include in the application bundle (default None)"),
("matplotlib-backends=", None, "set of matplotlib backends to include (default: include entire package)"),
("extra-scripts=", None, "set of scripts to include in the application bundle, next to the main application script"),
("include-plugins=", None, "List of plugins to include"),
("force-system-tk", None, "Ensure that Tkinter is linked against Apple's build of Tcl/Tk"),
("report-missing-from-imports", None, "Report the list of missing names for 'from module import name'"),
("no-report-missing-conditional-import", None, "Don't report missing modules when they appear to be conditional imports"),
]
boolean_options = [
#"compressed",
"xref",
"strip",
"no-strip",
"site-packages",
"semi-standalone",
"alias",
"argv-emulation",
#"no-zip",
"use-pythonpath",
"use-faulthandler",
"verbose-interpreter",
"no-chdir",
"debug-modulegraph",
"debug-skip-macholib",
"graph",
"prefer-ppc",
"emulate-shell-environment",
"force-system-tk",
"report-missing-from-imports",
"no-report-missing-conditional-import",
]
def initialize_options (self):
self.app = None
self.plugin = None
self.bdist_base = None
self.xref = False
self.graph = False
self.no_zip = 0
self.optimize = 0
if hasattr(sys, 'flags'):
self.optimize = sys.flags.optimize
self.arch = None
self.strip = True
self.no_strip = False
self.iconfile = None
self.extension = None
self.alias = 0
self.argv_emulation = 0
self.emulate_shell_environment = 0
self.argv_inject = None
self.no_chdir = 0
self.site_packages = False
self.use_pythonpath = False
self.use_faulthandler = False
self.verbose_interpreter = False
self.includes = None
self.packages = None
self.excludes = None
self.dylib_excludes = None
self.frameworks = None
self.resources = None
self.datamodels = None
self.mappingmodels = None
self.plist = None
self.compressed = True
self.semi_standalone = is_system()
self.dist_dir = None
self.debug_skip_macholib = False
self.debug_modulegraph = False
self.prefer_ppc = False
self.filters = []
self.eggs = []
self.qt_plugins = None
self.matplotlib_backends = None
self.extra_scripts = None
self.include_plugins = None
self.force_system_tk = False
self.report_missing_from_imports = False
self.no_report_missing_conditional_import = False
def finalize_options (self):
if not self.strip:
self.no_strip = True
elif self.no_strip:
self.strip = False
self.optimize = int(self.optimize)
if self.argv_inject and isinstance(self.argv_inject, basestring):
self.argv_inject = shlex.split(self.argv_inject)
self.includes = set(fancy_split(self.includes))
self.includes.add('encodings.*')
if self.use_faulthandler:
self.includes.add('faulthandler')
#if sys.version_info[:2] >= (3, 2):
# self.includes.add('pkgutil')
# self.includes.add('imp')
self.packages = set(fancy_split(self.packages))
self.excludes = set(fancy_split(self.excludes))
self.excludes.add('readline')
# included by apptemplate
self.excludes.add('site')
if getattr(self.distribution, 'install_requires', None):
self.includes.add('pkg_resources')
self.eggs = pkg_resources.require(self.distribution.install_requires)
# Setuptools/distribute style namespace packages uses
# __import__('pkg_resources'), and that import isn't detected at the
# moment. Forcefully include pkg_resources.
self.includes.add('pkg_resources')
dylib_excludes = fancy_split(self.dylib_excludes)
self.dylib_excludes = []
for fn in dylib_excludes:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
try:
res = macholib.dyld.dyld_find(fn)
except ValueError:
res = fn
self.dylib_excludes.append(res)
self.resources = fancy_split(self.resources)
frameworks = fancy_split(self.frameworks)
self.frameworks = []
for fn in frameworks:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
res = macholib.dyld.dyld_find(fn)
while res in self.dylib_excludes:
self.dylib_excludes.remove(res)
self.frameworks.append(res)
if not self.plist:
self.plist = {}
if isinstance(self.plist, basestring):
self.plist = plistlib.Plist.fromFile(self.plist)
if isinstance(self.plist, plistlib.Dict):
self.plist = dict(self.plist.__dict__)
else:
self.plist = dict(self.plist)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('bdist_base', 'bdist_base'))
if self.semi_standalone:
self.filters.append(not_stdlib_filter)
if self.iconfile is None and 'CFBundleIconFile' not in self.plist:
# Default is the generic applet icon in the framework
iconfile = os.path.join(sys.prefix, 'Resources', 'Python.app',
'Contents', 'Resources', 'PythonApplet.icns')
if os.path.exists(iconfile):
self.iconfile = iconfile
self.runtime_preferences = list(self.get_runtime_preferences())
self.qt_plugins = fancy_split(self.qt_plugins)
self.matplotlib_backends = fancy_split(self.matplotlib_backends)
self.extra_scripts = fancy_split(self.extra_scripts)
self.include_plugins = fancy_split(self.include_plugins)
if self.datamodels:
print("WARNING: the datamodels option is deprecated, add model files to the list of resources")
if self.mappingmodels:
print("WARNING: the mappingmodels option is deprecated, add model files to the list of resources")
def get_default_plist(self):
# XXX - this is all single target stuff
plist = {}
target = self.targets[0]
version = self.distribution.get_version()
if version == '0.0.0':
try:
version = find_version(target.script)
except ValueError:
pass
if not isinstance(version, basestring):
raise DistutilsOptionError("Version must be a string")
if sys.version_info[0] > 2 and isinstance(version, type('a'.encode('ascii'))):
raise DistutilsOptionError("Version must be a string")
plist['CFBundleVersion'] = version
name = self.distribution.get_name()
if name == 'UNKNOWN':
base = target.get_dest_base()
name = os.path.basename(base)
plist['CFBundleName'] = name
return plist
def get_runtime(self, prefix=None, version=None):
# XXX - this is a bit of a hack!
# ideally we'd use dylib functions to figure this out
if prefix is None:
prefix = sys.prefix
if version is None:
version = sys.version
version = version[:3]
info = None
if os.path.exists(os.path.join(prefix, ".Python")):
# We're in a virtualenv environment, locate the real prefix
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
try:
fmwk = macholib.dyld.framework_find(prefix)
except ValueError:
info = None
else:
info = macholib.dyld.framework_info(fmwk)
if info is not None:
dylib = info['name']
runtime = os.path.join(info['location'], info['name'])
else:
dylib = 'libpython%s.dylib' % (sys.version[:3],)
runtime = os.path.join(prefix, 'lib', dylib)
return dylib, runtime
def symlink(self, src, dst):
try:
os.remove(dst)
except OSError:
pass
os.symlink(src, dst)
def get_runtime_preferences(self, prefix=None, version=None):
dylib, runtime = self.get_runtime(prefix=prefix, version=version)
yield os.path.join('@executable_path', '..', 'Frameworks', dylib)
if self.semi_standalone or self.alias:
yield runtime
def run(self):
if get_config_var('PYTHONFRAMEWORK') is None:
if not get_config_var('Py_ENABLE_SHARED'):
raise DistutilsPlatformError("This python does not have a shared library or framework")
else:
# Issue .. in py2app's tracker, and issue .. in python's tracker: a unix-style shared
# library build did not read the application environment correctly. The collection of
# if statements below gives a clean error message when py2app is started, instead of
# building a bundle that will give a confusing error message when started.
msg = "py2app is not supported for a shared library build with this version of python"
if sys.version_info[:2] < (2,7):
raise DistutilsPlatformError(msg)
elif sys.version_info[:2] == (2,7) and sys.version[3] < 4:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] < 2:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 2 and sys.version_info[3] < 3:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 3 and sys.version_info[3] < 1:
raise DistutilsPlatformError(msg)
if hasattr(self.distribution, "install_requires") \
and self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
build = self.reinitialize_command('build')
build.build_base = self.bdist_base
build.run()
self.create_directories()
self.fixup_distribution()
self.initialize_plist()
sys_old_path = sys.path[:]
extra_paths = [
os.path.dirname(target.script)
for target in self.targets
]
extra_paths.extend([build.build_platlib, build.build_lib])
self.additional_paths = [
os.path.abspath(p)
for p in extra_paths
if p is not None
]
sys.path[:0] = self.additional_paths
# this needs additional_paths
self.initialize_prescripts()
try:
self._run()
finally:
sys.path = sys_old_path
def iter_datamodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.datamodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcdatamodel':
basefn = fn
fn += '.xcdatamodel'
destfn = os.path.basename(basefn) + '.mom'
yield fn, os.path.join(resdir, path, destfn)
def compile_datamodels(self, resdir):
for src, dest in self.iter_datamodels(resdir):
print("compile datamodel", src, "->", dest)
self.mkpath(os.path.dirname(dest))
momc(src, dest)
def iter_mappingmodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.mappingmodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcmappingmodel':
basefn = fn
fn += '.xcmappingmodel'
destfn = os.path.basename(basefn) + '.cdm'
yield fn, os.path.join(resdir, path, destfn)
def compile_mappingmodels(self, resdir):
for src, dest in self.iter_mappingmodels(resdir):
self.mkpath(os.path.dirname(dest))
mapc(src, dest)
def iter_extra_plugins(self):
for item in self.include_plugins:
if isinstance(item, (list, tuple)):
subdir, path = item
else:
ext = os.path.splitext(item)[1]
try:
subdir = PLUGIN_SUFFIXES[ext]
path = item
except KeyError:
raise DistutilsOptionError("Cannot determine subdirectory for plugin %s"%(item,))
yield path, os.path.join(subdir, os.path.basename(path))
def iter_data_files(self):
dist = self.distribution
allres = chain(getattr(dist, 'data_files', ()) or (), self.resources)
for (path, files) in (normalize_data_file(fn) for fn in allres):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
yield fn, os.path.join(path, os.path.basename(fn))
def collect_scripts(self):
# these contains file names
scripts = set()
for target in self.targets:
scripts.add(target.script)
scripts.update([
k for k in target.prescripts if isinstance(k, basestring)
])
if hasattr(target, 'extra_scripts'):
scripts.update(target.extra_scripts)
scripts.update(self.extra_scripts)
return scripts
def get_plist_options(self):
result = dict(
PyOptions=dict(
use_pythonpath=bool(self.use_pythonpath),
site_packages=bool(self.site_packages),
alias=bool(self.alias),
argv_emulation=bool(self.argv_emulation),
emulate_shell_environment=bool(self.emulate_shell_environment),
no_chdir=bool(self.no_chdir),
prefer_ppc=self.prefer_ppc,
verbose=self.verbose_interpreter,
use_faulthandler=self.use_faulthandler,
),
)
if self.optimize:
result['PyOptions']['optimize'] = self.optimize
return result
def initialize_plist(self):
plist = self.get_default_plist()
for target in self.targets:
plist.update(getattr(target, 'plist', {}))
plist.update(self.plist)
plist.update(self.get_plist_options())
if self.iconfile:
iconfile = self.iconfile
if not os.path.exists(iconfile):
iconfile = iconfile + '.icns'
if not os.path.exists(iconfile):
raise DistutilsOptionError("icon file must exist: %r"
% (self.iconfile,))
self.resources.append(iconfile)
plist['CFBundleIconFile'] = os.path.basename(iconfile)
if self.prefer_ppc:
plist['LSPrefersPPC'] = True
self.plist = plist
return plist
def run_alias(self):
self.app_files = []
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.update(extra_scripts)
dst = self.build_alias_executable(target, target.script, extra_scripts)
self.app_files.append(dst)
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(target.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
def collect_recipedict(self):
return dict(iterRecipes())
def get_modulefinder(self):
if self.debug_modulegraph:
debug = 4
else:
debug = 0
return find_modules(
scripts=self.collect_scripts(),
includes=self.includes,
packages=self.packages,
excludes=self.excludes,
debug=debug,
)
def collect_filters(self):
return [has_filename_filter] + list(self.filters)
def process_recipes(self, mf, filters, flatpackages, loader_files):
rdict = self.collect_recipedict()
while True:
for name, check in rdict.items():
rval = check(self, mf)
if rval is None:
continue
# we can pull this off so long as we stop the iter
del rdict[name]
print('*** using recipe: %s ***' % (name,))
if rval.get('packages'):
self.packages.update(rval['packages'])
find_needed_modules(mf, packages=rval['packages'])
for pkg in rval.get('flatpackages', ()):
if isinstance(pkg, basestring):
pkg = (os.path.basename(pkg), pkg)
flatpackages[pkg[0]] = pkg[1]
filters.extend(rval.get('filters', ()))
loader_files.extend(rval.get('loader_files', ()))
newbootstraps = list(map(self.get_bootstrap,
rval.get('prescripts', ())))
if rval.get('includes'):
find_needed_modules(mf, includes=rval['includes'])
if rval.get('resources'):
self.resources.extend(rval['resources'])
for fn in newbootstraps:
if isinstance(fn, basestring):
mf.run_script(fn)
for target in self.targets:
target.prescripts.extend(newbootstraps)
break
else:
break
def _run(self):
try:
if self.alias:
self.run_alias()
else:
self.run_normal()
except:
raise
# XXX - remove when not debugging
# distutils sucks
import pdb, sys, traceback
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
print("Done!")
def filter_dependencies(self, mf, filters):
print("*** filtering dependencies ***")
nodes_seen, nodes_removed, nodes_orphaned = mf.filterStack(filters)
print('%d total' % (nodes_seen,))
print('%d filtered' % (nodes_removed,))
print('%d orphaned' % (nodes_orphaned,))
print('%d remaining' % (nodes_seen - nodes_removed,))
def get_appname(self):
return self.plist['CFBundleName']
def build_xref(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.html')
print("*** creating dependency html: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.create_xref(fp)
def build_graph(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.dot')
print("*** creating dependency graph: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.graphreport(fp, flatpackages=flatpackages)
def finalize_modulefinder(self, mf):
for item in mf.flatten():
if isinstance(item, Package) and item.filename == '-':
if sys.version_info[:2] <= (3,3):
fn = os.path.join(self.temp_dir, 'empty_package', '__init__.py')
if not os.path.exists(fn):
dn = os.path.dirname(fn)
if not os.path.exists(dn):
os.makedirs(dn)
with open(fn, 'w') as fp:
pass
item.filename = fn
py_files, extensions = parse_mf_results(mf)
# Remove all top-level scripts from the list of python files,
# those get treated differently.
py_files = [ item for item in py_files if not isinstance(item, Script) ]
extensions = list(extensions)
return py_files, extensions
def collect_packagedirs(self):
return list(filter(os.path.exists, [
os.path.join(os.path.realpath(self.get_bootstrap(pkg)), '')
for pkg in self.packages
]))
def run_normal(self):
mf = self.get_modulefinder()
filters = self.collect_filters()
flatpackages = {}
loader_files = []
self.process_recipes(mf, filters, flatpackages, loader_files)
if self.debug_modulegraph:
import pdb
pdb.Pdb().set_trace()
self.filter_dependencies(mf, filters)
if self.graph:
self.build_graph(mf, flatpackages)
if self.xref:
self.build_xref(mf, flatpackages)
py_files, extensions = self.finalize_modulefinder(mf)
pkgdirs = self.collect_packagedirs()
self.create_binaries(py_files, pkgdirs, extensions, loader_files)
missing = []
syntax_error = []
invalid_bytecode = []
for module in mf.nodes():
if isinstance(module, modulegraph.MissingModule):
if module.identifier != '__main__':
missing.append(module)
elif isinstance(module, modulegraph.InvalidSourceModule):
syntax_error.append(module)
elif hasattr(modulegraph, 'InvalidCompiledModule') and isinstance(module, modulegraph.InvalidCompiledModule):
invalid_bytecode.append(module)
if missing:
missing_unconditional = collections.defaultdict(set)
missing_fromimport = collections.defaultdict(set)
missing_fromimport_conditional = collections.defaultdict(set)
missing_conditional = collections.defaultdict(set)
for module in sorted(missing):
for m in mf.getReferers(module):
if m is None: continue # XXX
try:
ed = mf.edgeData(m, module)
except KeyError:
ed = None
if hasattr(modulegraph, 'DependencyInfo') and isinstance(ed, modulegraph.DependencyInfo):
c = missing_unconditional
if ed.conditional or ed.function:
if ed.fromlist:
c = missing_fromimport_conditional
else:
c = missing_conditional
elif ed.fromlist:
c = missing_fromimport
c[module.identifier].add(m.identifier)
else:
missing_unconditional[module.identifier].add(m.identifier)
if missing_unconditional:
log.warn("Modules not found (unconditional imports):")
for m in sorted(missing_unconditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_unconditional[m]))))
log.warn("")
if missing_conditional and not self.no_report_missing_conditional_import:
log.warn("Modules not found (conditional imports):")
for m in sorted(missing_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_conditional[m]))))
log.warn("")
if self.report_missing_from_imports and (
missing_fromimport or (
not self.no_report_missing_conditional_import and missing_fromimport_conditional)):
log.warn("Modules not found ('from ... import y'):")
for m in sorted(missing_fromimport):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport[m]))))
if not self.no_report_missing_conditional_import and missing_fromimport_conditional:
log.warn("")
log.warn("Conditional:")
for m in sorted(missing_fromimport_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport_conditional[m]))))
log.warn("")
if syntax_error:
log.warn("Modules with syntax errors:")
for module in sorted(syntax_error):
log.warn(" * %s"%(module.identifier))
log.warn("")
if invalid_bytecode:
log.warn("Modules with invalid bytecode:")
for module in sorted(invalid_bytecode):
log.warn(" * %s"%(module.identifier))
log.warn("")
def create_directories(self):
bdist_base = self.bdist_base
if self.semi_standalone:
self.bdist_dir = os.path.join(bdist_base,
'python%s-semi_standalone' % (sys.version[:3],), 'app')
else:
self.bdist_dir = os.path.join(bdist_base,
'python%s-standalone' % (sys.version[:3],), 'app')
if os.path.exists(self.bdist_dir):
shutil.rmtree(self.bdist_dir)
self.collect_dir = os.path.abspath(
os.path.join(self.bdist_dir, "collect"))
self.mkpath(self.collect_dir)
self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp"))
self.mkpath(self.temp_dir)
self.dist_dir = os.path.abspath(self.dist_dir)
self.mkpath(self.dist_dir)
self.lib_dir = os.path.join(self.bdist_dir,
os.path.dirname(get_zipfile(self.distribution, self.semi_standalone)))
self.mkpath(self.lib_dir)
self.ext_dir = os.path.join(self.lib_dir, 'lib-dynload')
self.mkpath(self.ext_dir)
self.framework_dir = os.path.join(self.bdist_dir, 'Frameworks')
self.mkpath(self.framework_dir)
def create_binaries(self, py_files, pkgdirs, extensions, loader_files):
print("*** create binaries ***")
dist = self.distribution
pkgexts = []
copyexts = []
extmap = {}
def packagefilter(mod, pkgdirs=pkgdirs):
fn = os.path.realpath(getattr(mod, 'filename', None))
if fn is None:
return None
for pkgdir in pkgdirs:
if fn.startswith(pkgdir):
return None
return fn
if pkgdirs:
py_files = list(filter(packagefilter, py_files))
for ext in extensions:
fn = packagefilter(ext)
if fn is None:
fn = os.path.realpath(getattr(ext, 'filename', None))
pkgexts.append(ext)
else:
if '.' in ext.identifier:
py_files.append(self.create_loader(ext))
copyexts.append(ext)
extmap[fn] = ext
# byte compile the python modules into the target directory
print("*** byte compile python files ***")
byte_compile(py_files,
target_dir=self.collect_dir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
for item in py_files:
if not isinstance(item, Package): continue
self.copy_package_data(item, self.collect_dir)
self.lib_files = []
self.app_files = []
# create the shared zipfile containing all Python modules
archive_name = os.path.join(self.lib_dir,
get_zipfile(dist, self.semi_standalone))
for path, files in loader_files:
dest = os.path.join(self.collect_dir, path)
self.mkpath(dest)
for fn in files:
destfn = os.path.join(dest, os.path.basename(fn))
if os.path.isdir(fn):
self.copy_tree(fn, destfn, preserve_symlinks=False)
else:
self.copy_file(fn, destfn)
arcname = self.make_lib_archive(archive_name,
base_dir=self.collect_dir, verbose=self.verbose,
dry_run=self.dry_run)
# XXX: this doesn't work with python3
#self.lib_files.append(arcname)
# build the executables
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.extend(target.extra_scripts)
dst = self.build_executable(
target, arcname, pkgexts, copyexts, target.script, extra_scripts)
exp = os.path.join(dst, 'Contents', 'MacOS')
execdst = os.path.join(exp, 'python')
if self.semi_standalone:
self.symlink(sys.executable, execdst)
else:
if os.path.exists(os.path.join(sys.prefix, ".Python")):
fn = os.path.join(sys.prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
rest_path = os.path.normpath(sys.executable)[len(os.path.normpath(sys.prefix))+1:]
if rest_path.startswith('.'):
rest_path = rest_path[1:]
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(os.path.join(prefix, rest_path), execdst)
else:
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(sys.executable, execdst)
if not self.debug_skip_macholib:
if self.force_system_tk:
print("force system tk")
resdir = os.path.join(dst, 'Contents', 'Resources')
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
tkinter_path = os.path.join(ext_dir, '_tkinter.so')
if os.path.exists(tkinter_path):
rewrite_tkinter_load_commands(tkinter_path)
else:
print("tkinter not found at", tkinter_path)
mm = PythonStandalone(self, dst, executable_path=exp)
dylib, runtime = self.get_runtime()
if self.semi_standalone:
mm.excludes.append(runtime)
else:
mm.mm.run_file(runtime)
for exclude in self.dylib_excludes:
info = macholib.dyld.framework_info(exclude)
if info is not None:
exclude = os.path.join(
info['location'], info['shortname'] + '.framework')
mm.excludes.append(exclude)
for fmwk in self.frameworks:
mm.mm.run_file(fmwk)
platfiles = mm.run()
if self.strip:
platfiles = self.strip_dsym(platfiles)
self.strip_files(platfiles)
self.app_files.append(dst)
def copy_package_data(self, package, target_dir):
"""
Copy any package data in a python package into the target_dir.
This is a bit of a hack, it would be better to identify python eggs
and copy those in whole.
"""
exts = [ i[0] for i in imp.get_suffixes() ]
exts.append('.py')
exts.append('.pyc')
exts.append('.pyo')
def datafilter(item):
for e in exts:
if item.endswith(e):
return False
return True
target_dir = os.path.join(target_dir, *(package.identifier.split('.')))
for dname in package.packagepath:
filenames = list(filter(datafilter, zipio.listdir(dname)))
for fname in filenames:
if fname in ('.svn', 'CVS', '.hg', '.git'):
# Scrub revision manager junk
continue
if fname in ('__pycache__',):
# Ignore PEP 3147 bytecode cache
continue
if fname.startswith('.') and fname.endswith('.swp'):
# Ignore vim(1) temporary files
continue
if fname.endswith('~') or fname.endswith('.orig'):
# Ignore backup files for common tools (hg, emacs, ...)
continue
pth = os.path.join(dname, fname)
# Check if we have found a package, exclude those
if zipio.isdir(pth):
# XXX: the 'and not' part is wrong, need to fix zipio.isdir
for p in zipio.listdir(pth):
if p.startswith('__init__.') and p[8:] in exts:
break
else:
if os.path.isfile(pth):
# Avoid extracting a resource file that happens
# to be zipfile.
# XXX: Need API in zipio for nicer code.
copy_file(pth, os.path.join(target_dir, fname))
else:
copy_tree(pth, os.path.join(target_dir, fname))
continue
elif zipio.isdir(pth) and (
zipio.isfile(os.path.join(pth, '__init__.py'))
or zipio.isfile(os.path.join(pth, '__init__.pyc'))
or zipio.isfile(os.path.join(pth, '__init__.pyo'))):
# Subdirectory is a python package, these will get included later on
# when the subpackage itself is included, ignore for now.
pass
else:
copy_file(pth, os.path.join(target_dir, fname))
def strip_dsym(self, platfiles):
""" Remove .dSYM directories in the bundled application """
#
# .dSYM directories are contain detached debugging information and
# should be completely removed when the "strip" option is specified.
#
if self.dry_run:
return platfiles
for dirpath, dnames, fnames in os.walk(self.appdir):
for nm in list(dnames):
if nm.endswith('.dSYM'):
print("removing debug info: %s/%s"%(dirpath, nm))
shutil.rmtree(os.path.join(dirpath, nm))
dnames.remove(nm)
return [file for file in platfiles if '.dSYM' not in file]
def strip_files(self, files):
unstripped = 0
stripfiles = []
for fn in files:
unstripped += os.stat(fn).st_size
stripfiles.append(fn)
log.info('stripping %s', os.path.basename(fn))
strip_files(stripfiles, dry_run=self.dry_run, verbose=self.verbose)
stripped = 0
for fn in stripfiles:
stripped += os.stat(fn).st_size
log.info('stripping saved %d bytes (%d / %d)',
unstripped - stripped, stripped, unstripped)
def copy_dylib(self, src, dst):
# will be copied from the framework?
if src != sys.executable:
force, self.force = self.force, True
self.copy_file(src, dst)
self.force = force
return dst
def copy_versioned_framework(self, info, dst):
# XXX - Boy is this ugly, but it makes sense because the developer
# could have both Python 2.3 and 2.4, or Tk 8.4 and 8.5, etc.
# Saves a good deal of space, and I'm pretty sure this ugly
# hack is correct in the general case.
version = info['version']
if version is None:
return self.raw_copy_framework(info, dst)
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
vsplit = os.path.join(infile, 'Versions').split(os.sep)
def condition(src, vsplit=vsplit, version=version):
srcsplit = src.split(os.sep)
if (
len(srcsplit) > len(vsplit) and
srcsplit[:len(vsplit)] == vsplit and
srcsplit[len(vsplit)] != version and
not os.path.islink(src)
):
return False
# Skip Headers, .svn, and CVS dirs
return framework_copy_condition(src)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=condition)
def copy_framework(self, info, dst):
force, self.force = self.force, True
if info['shortname'] == PYTHONFRAMEWORK:
self.copy_python_framework(info, dst)
else:
self.copy_versioned_framework(info, dst)
self.force = force
return os.path.join(dst, info['name'])
def raw_copy_framework(self, info, dst):
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=framework_copy_condition)
def copy_python_framework(self, info, dst):
# XXX - In this particular case we know exactly what we can
# get away with.. should this be extended to the general
# case? Per-framework recipes?
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
indir = os.path.dirname(os.path.join(info['location'], info['name']))
outdir = os.path.dirname(os.path.join(dst, info['name']))
self.mkpath(os.path.join(outdir, 'Resources'))
pydir = 'python%s.%s'%(sys.version_info[:2])
# Create a symlink "for Python.frameworks/Versions/Current". This
# is required for the Mac App-store.
os.symlink(
os.path.basename(outdir),
os.path.join(os.path.dirname(outdir), "Current"))
# Likewise for two links in the root of the framework:
os.symlink(
'Versions/Current/Resources',
os.path.join(os.path.dirname(os.path.dirname(outdir)), 'Resources'))
os.symlink(
os.path.join('Versions/Current', PYTHONFRAMEWORK),
os.path.join(os.path.dirname(os.path.dirname(outdir)), PYTHONFRAMEWORK))
# Experiment for issue 57
if not os.path.exists(os.path.join(indir, 'include')):
alt = os.path.join(indir, 'Versions/Current')
if os.path.exists(os.path.join(alt, 'include')):
indir = alt
# distutils looks for some files relative to sys.executable, which
# means they have to be in the framework...
self.mkpath(os.path.join(outdir, 'include'))
self.mkpath(os.path.join(outdir, 'include', includedir))
self.mkpath(os.path.join(outdir, 'lib'))
self.mkpath(os.path.join(outdir, 'lib', pydir))
self.mkpath(os.path.join(outdir, 'lib', pydir, configdir))
fmwkfiles = [
os.path.basename(info['name']),
'Resources/Info.plist',
'include/%s/pyconfig.h'%(includedir),
]
if '_sysconfigdata' not in sys.modules:
fmwkfiles.append(
'lib/%s/%s/Makefile'%(pydir, configdir)
)
for fn in fmwkfiles:
self.copy_file(
os.path.join(indir, fn),
os.path.join(outdir, fn))
def fixup_distribution(self):
dist = self.distribution
# Trying to obtain app and plugin from dist for backward compatibility
# reasons.
app = dist.app
plugin = dist.plugin
# If we can get suitable values from self.app and self.plugin, we prefer
# them.
if self.app is not None or self.plugin is not None:
app = self.app
plugin = self.plugin
# Convert our args into target objects.
dist.app = FixupTargets(app, "script")
dist.plugin = FixupTargets(plugin, "script")
if dist.app and dist.plugin:
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
elif dist.app:
self.style = 'app'
self.targets = dist.app
elif dist.plugin:
self.style = 'plugin'
self.targets = dist.plugin
else:
raise DistutilsOptionError(
"You must specify either app or plugin")
if len(self.targets) != 1:
# XXX - support multiple targets?
raise DistutilsOptionError(
"Multiple targets not currently supported")
if not self.extension:
self.extension = '.' + self.style
# make sure all targets use the same directory, this is
# also the directory where the pythonXX.dylib must reside
paths = set()
for target in self.targets:
paths.add(os.path.dirname(target.get_dest_base()))
if len(paths) > 1:
raise DistutilsOptionError(
"all targets must use the same directory: %s" %
([p for p in paths],))
if paths:
app_dir = paths.pop() # the only element
if os.path.isabs(app_dir):
raise DistutilsOptionError(
"app directory must be relative: %s" % (app_dir,))
self.app_dir = os.path.join(self.dist_dir, app_dir)
self.mkpath(self.app_dir)
else:
# Do we allow to specify no targets?
# We can at least build a zipfile...
self.app_dir = self.lib_dir
def initialize_prescripts(self):
prescripts = []
prescripts.append('reset_sys_path')
if self.semi_standalone:
prescripts.append('semi_standalone_path')
if 0 and sys.version_info[:2] >= (3, 2) and not self.alias:
# Python 3.2 or later requires a more complicated
# bootstrap
prescripts.append('import_encodings')
if os.path.exists(os.path.join(sys.prefix, ".Python")):
# We're in a virtualenv, which means sys.path
# will be broken in alias builds unless we fix
# it.
if self.alias or self.semi_standalone:
prescripts.append("virtualenv")
prescripts.append(StringIO('_fixup_virtualenv(%r)' % (sys.real_prefix,)))
if self.site_packages or self.alias:
import site
global_site_packages = not os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt'))
prescripts.append('virtualenv_site_packages')
prescripts.append(StringIO('_site_packages(%r, %r, %d)' % (
sys.prefix, sys.real_prefix, global_site_packages)))
elif self.site_packages or self.alias:
prescripts.append('site_packages')
if is_system():
prescripts.append('system_path_extras')
#if self.style == 'app':
# prescripts.append('setup_pkgresource')
included_subpkg = [pkg for pkg in self.packages if '.' in pkg]
if included_subpkg:
prescripts.append('setup_included_subpackages')
prescripts.append(StringIO('_path_hooks = %r'%(
included_subpkg)))
if self.emulate_shell_environment:
prescripts.append('emulate_shell_environment')
if self.argv_emulation and self.style == 'app':
prescripts.append('argv_emulation')
if 'CFBundleDocumentTypes' not in self.plist:
self.plist['CFBundleDocumentTypes'] = [
{
'CFBundleTypeOSTypes' : [
'****',
'fold',
'disk',
],
'CFBundleTypeRole': 'Viewer'
},
]
if self.argv_inject is not None:
prescripts.append('argv_inject')
prescripts.append(
StringIO('_argv_inject(%r)\n' % (self.argv_inject,)))
if self.style == 'app' and not self.no_chdir:
prescripts.append('chdir_resource')
if not self.alias:
prescripts.append('disable_linecache')
prescripts.append('boot_' + self.style)
else:
# Add ctypes prescript because it is needed to
# find libraries in the bundle, but we don't run
# recipes and hence the ctypes recipe is not used
# for alias builds.
prescripts.append('ctypes_setup')
if self.additional_paths:
prescripts.append('path_inject')
prescripts.append(
StringIO('_path_inject(%r)\n' % (self.additional_paths,)))
prescripts.append('boot_alias' + self.style)
newprescripts = []
for s in prescripts:
if isinstance(s, basestring):
newprescripts.append(
self.get_bootstrap('py2app.bootstrap.' + s))
else:
newprescripts.append(s)
for target in self.targets:
prescripts = getattr(target, 'prescripts', [])
target.prescripts = newprescripts + prescripts
def get_bootstrap(self, bootstrap):
if isinstance(bootstrap, basestring):
if not os.path.exists(bootstrap):
bootstrap = imp_find_module(bootstrap)[1]
return bootstrap
def get_bootstrap_data(self, bootstrap):
bootstrap = self.get_bootstrap(bootstrap)
if not isinstance(bootstrap, basestring):
return bootstrap.getvalue()
else:
with open(bootstrap, 'rU') as fp:
return fp.read()
def create_pluginbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating plugin bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
appdir, plist = create_pluginbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_appbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating application bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
pythonInfo = self.plist.setdefault('PythonInfoDict', {})
py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict(
alias=bool(self.alias),
))
appdir, plist = create_appbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_bundle(self, target, script, use_runtime_preference=True):
fn = getattr(self, 'create_%sbundle' % (self.style,))
return fn(
target,
script,
use_runtime_preference=use_runtime_preference
)
def iter_frameworks(self):
for fn in self.frameworks:
fmwk = macholib.dyld.framework_info(fn)
if fmwk is None:
yield fn
else:
basename = fmwk['shortname'] + '.framework'
yield os.path.join(fmwk['location'], basename)
def build_alias_executable(self, target, script, extra_scripts):
# Build an alias executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
# symlink python executable
execdst = os.path.join(appdir, 'Contents', 'MacOS', 'python')
prefixPathExecutable = os.path.join(sys.prefix, 'bin', 'python')
if os.path.exists(prefixPathExecutable):
pyExecutable = prefixPathExecutable
else:
pyExecutable = sys.executable
self.symlink(pyExecutable, execdst)
# make PYTHONHOME
pyhome = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
makedirs(pyhome)
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pyhome, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pyhome, 'site.pyc'))
self.symlink(
os.path.join(realhome, 'config'),
os.path.join(pyhome, 'config'))
# symlink data files
# XXX: fixme: need to integrate automatic data conversion
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run, symlink=1)
except:
import traceback
traceback.print_exc()
raise
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run)
except:
import traceback
traceback.print_exc()
raise
# symlink frameworks
for src in self.iter_frameworks():
dest = os.path.join(
appdir, 'Contents', 'Frameworks', os.path.basename(src))
if src == dest:
continue
makedirs(os.path.dirname(dest))
self.symlink(os.path.abspath(src), dest)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.realpath(script),))
script_map = {}
for fn in extra_scripts:
tgt = os.path.realpath(fn)
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = tgt
elif fn.endswith('.py'):
script_map[fn[:-4]] = tgt
else:
script_map[fn] = tgt
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('try:\n')
bootfile.write(' _run()\n')
bootfile.write('except KeyboardInterrupt:\n')
bootfile.write(' pass\n')
bootfile.close()
target.appdir = appdir
return appdir
def build_executable(self, target, arcname, pkgexts, copyexts, script, extra_scripts):
# Build an executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
self.appdir = appdir
self.resdir = resdir
self.plist = plist
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(self.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
site_path = os.path.join(resdir, 'site.py')
byte_compile([
SourceModule('site', site_path),
],
target_dir=resdir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
if not self.dry_run:
os.unlink(site_path)
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.basename(script),))
script_map = {}
for fn in extra_scripts:
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = fn
elif fn.endswith('.py'):
script_map[fn[:-4]] = fn
else:
script_map[fn] = fn
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('_run()\n')
bootfile.close()
self.copy_file(script, resdir)
for fn in extra_scripts:
self.copy_file(fn, resdir)
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
if sys.version_info[0] == 2 or self.semi_standalone:
arcdir = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
else:
arcdir = os.path.join(resdir, 'lib')
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
self.mkpath(pydir)
# The site.py file needs to be a two locations
# 1) in lib/pythonX.Y, to be found during normal startup and
# by the 'python' executable
# 2) in the resources directory next to the script for
# semistandalone builds (the lib/pythonX.Y directory is too
# late on sys.path to be found in that case).
#
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pydir, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pydir, 'site.pyc'))
cfgdir = os.path.join(pydir, configdir)
realcfg = os.path.join(realhome, configdir)
real_include = os.path.join(sys.prefix, 'include')
if self.semi_standalone:
self.symlink(realcfg, cfgdir)
self.symlink(real_include, os.path.join(resdir, 'include'))
else:
self.mkpath(cfgdir)
if '_sysconfigdata' not in sys.modules:
# Recent enough versions of Python 2.7 and 3.x have
# an _sysconfigdata module and don't need the Makefile
# to provide the sysconfig data interface. Don't copy
# them.
for fn in 'Makefile', 'Setup', 'Setup.local', 'Setup.config':
rfn = os.path.join(realcfg, fn)
if os.path.exists(rfn):
self.copy_file(rfn, os.path.join(cfgdir, fn))
inc_dir = os.path.join(resdir, 'include', includedir)
self.mkpath(inc_dir)
self.copy_file(get_config_h_filename(),
os.path.join(inc_dir, 'pyconfig.h'))
self.copy_file(arcname, arcdir)
if sys.version_info[0] != 2:
import zlib
self.copy_file(zlib.__file__, os.path.dirname(arcdir))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
self.copy_tree(self.ext_dir, ext_dir, preserve_symlinks=True)
self.copy_tree(self.framework_dir,
os.path.join(appdir, 'Contents', 'Frameworks'),
preserve_symlinks=True)
for pkg_name in self.packages:
pkg = self.get_bootstrap(pkg_name)
print('XXXX', pkg_name, pkg)
if self.semi_standalone:
# For semi-standalone builds don't copy packages
# from the stdlib into the app bundle, even when
# they are mentioned in self.packages.
p = Package(pkg_name, pkg)
if not not_stdlib_filter(p):
continue
dst = os.path.join(pydir, pkg_name)
self.mkpath(dst)
self.copy_tree(pkg, dst)
# FIXME: The python files should be bytecompiled
# here (see issue 101)
for copyext in copyexts:
fn = os.path.join(ext_dir,
(copyext.identifier.replace('.', os.sep) +
os.path.splitext(copyext.filename)[1])
)
self.mkpath(os.path.dirname(fn))
copy_file(copyext.filename, fn, dry_run=self.dry_run)
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
target.appdir = appdir
return appdir
def create_loader(self, item):
# Hm, how to avoid needless recreation of this file?
slashname = item.identifier.replace('.', os.sep)
pathname = os.path.join(self.temp_dir, "%s.py" % slashname)
if os.path.exists(pathname):
if self.verbose:
print("skipping python loader for extension %r"
% (item.identifier,))
else:
self.mkpath(os.path.dirname(pathname))
# and what about dry_run?
if self.verbose:
print("creating python loader for extension %r"
% (item.identifier,))
fname = slashname + os.path.splitext(item.filename)[1]
source = make_loader(fname)
if not self.dry_run:
with open(pathname, "w") as fp:
fp.write(source)
else:
return
return SourceModule(item.identifier, pathname)
def make_lib_archive(self, zip_filename, base_dir, verbose=0,
dry_run=0):
# Like distutils "make_archive", except we can specify the
# compression to use - default is ZIP_STORED to keep the
# runtime performance up.
# Also, we don't append '.zip' to the filename.
from distutils.dir_util import mkpath
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
if self.compressed:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=compression)
save_cwd = os.getcwd()
os.chdir(base_dir)
for dirpath, dirnames, filenames in os.walk('.'):
if filenames:
# Ensure that there are directory entries for
# all directories in the zipfile. This is a
# workaround for <http://bugs.python.org/issue14905>:
# zipimport won't consider 'pkg/foo.py' to be in
# namespace package 'pkg' unless there is an
# entry for the directory (or there is a
# pkg/__init__.py file as well)
z.write(dirpath, dirpath)
for fn in filenames:
path = os.path.normpath(os.path.join(dirpath, fn))
if os.path.isfile(path):
z.write(path, path)
os.chdir(save_cwd)
z.close()
return zip_filename
def copy_tree(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1, condition=None):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
This version doesn't bork on existing symlinks
"""
return copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run,
condition=condition)
| apache-2.0 |