repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
paulgradie/SeqPyPlot | main_app/seqpyplot/parsers/htseq_parser.py | 1 | 2244 | """
Read a directory of expression counts in ht-seq format. Each sample
should be an individual file in the directory. File names and
sample order are specified in the config file (order is determined
by order IN the config.)
This class is intended to return the raw dataframe of samples with
missing sample columns as NaN.
"""
import pandas as pd
from pathos.multiprocessing import ProcessPool
import pathlib
try:
from functools import reduce # for py3 compatibility
except ImportError:
pass
class HtSeqParser(object):
def __init__(self, nodes=2):
self.nodes = nodes
def parse_data(self, data_paths, sample_names):
"""
Read the input files from the config file and load in to a
pandas dataframe.
params
data_paths: list of file paths specified in the config. Returned
from config parse sample_names: list of sample names specified in
the config returned from config parse
"""
output = self.load_data(data_paths, sample_names)
data, ercc_df = (self.merge_dfs(output)
.pipe(self.df_cleanup)
.pipe(self.split_on_ercc))
return data, ercc_df
def load_data(self, data_paths, sample_names):
" Multiprocess load of files in to a list of dfs "
pool = ProcessPool(nodes=self.nodes)
dfs = pool.map(self.load_func, zip(data_paths, sample_names))
return dfs
@staticmethod
def load_func(data_tuple):
path, sample_name = data_tuple
return pd.read_csv(path, sep='\t', names=['gene', sample_name])
def merge_dfs(self, dfs):
return reduce(lambda x, y: pd.merge(x, y, on='gene', how='outer'), dfs)
def df_cleanup(self, df_old):
" Clean away unwanted columns, reset index, and fillna "
df = df_old.copy()
df = df[df['gene'].str.startswith('__') == False]
df.set_index('gene', inplace=True)
df.fillna(value='Nan', inplace=True)
return df
def split_on_ercc(self, df):
" Extract the ERCC data "
ercc_cols = df.index.str.startswith('ERCC-')
ercc_df = df[ercc_cols]
data = df[~ercc_cols]
return data, ercc_df
| gpl-3.0 |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue (jeremy@o-donoghue.com) and the Agg backend by John
Hunter (jdhunter@ace.bsd.uchicago.edu)
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/test_replace.py | 15 | 43479 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import re
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import (DataFrame, Series, Index, date_range, compat,
Timestamp)
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReplace(TestData):
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
pytest.raises(TypeError, self.tsframe.replace, nan, inplace=True)
pytest.raises(TypeError, self.tsframe.replace, nan)
# mixed type
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
# mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
result = df.replace({'Type': {'Q': 0, 'T': 1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
def test_replace_regex_metachar(self):
metachars = '[]', '()', r'\d', r'\w', r'\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame([('-', pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
df1 = df.replace('-', np.nan)
expected_df = pd.DataFrame([(np.nan, pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df, expected)
# int block splitting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64'),
'C': Series([1, 2], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64'),
'C': Series([1, 2], dtype='int64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1, 'foo'], dtype='object'),
'B': Series([0, 1], dtype='int64')})
result = df.replace(2, 'foo')
assert_frame_equal(result, expected)
expected = DataFrame({'A': Series(['foo', 'bar'], dtype='object'),
'B': Series([0, 'foo'], dtype='object')})
result = df.replace([1, 2], ['foo', 'bar'])
assert_frame_equal(result, expected)
# test case from
df = DataFrame({'A': Series([3, 0], dtype='int64'),
'B': Series([0, 3], dtype='int64')})
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
pytest.raises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
assert result.values.all()
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
pytest.raises(ValueError, df.replace, to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
# dict to scalar
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
pytest.raises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
assert_frame_equal(res1, res2)
assert_frame_equal(res2, res3)
assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({r'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
'fname': {
'out_augmented_AUG_2011.json':
pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json':
pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json':
pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json':
pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json':
pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json':
pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json':
pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [0, np.nan, 2]})
result = df.replace(np.nan, 1)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': Series([0, 1, 2], dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Pacific'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Pacific'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({'A': np.nan}, Timestamp('20130104'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self):
# GH 15289
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
assert_frame_equal(df, df.replace({}))
assert_frame_equal(df, df.replace(Series([])))
assert_frame_equal(df, df.replace({'b': {}}))
assert_frame_equal(df, df.replace(Series({'b': {}})))
| apache-2.0 |
wangmiao1981/spark | python/pyspark/pandas/tests/test_stats.py | 6 | 18881 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
try:
from pandas._testing import makeMissingDataframe
except ImportError:
from pandas.util.testing import makeMissingDataframe
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
class StatsTest(PandasOnSparkTestCase, SQLTestUtils):
def _test_stat_functions(self, pdf_or_pser, psdf_or_psser):
functions = ["max", "min", "mean", "sum", "count"]
for funcname in functions:
self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)())
functions = ["std", "var", "product", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(),
getattr(pdf_or_pser, funcname)(),
check_exact=False,
)
functions = ["std", "var", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(ddof=0),
getattr(pdf_or_pser, funcname)(ddof=0),
check_exact=False,
)
# NOTE: To test skew, kurt, and median, just make sure they run.
# The numbers are different in spark and pandas.
functions = ["skew", "kurt", "median"]
for funcname in functions:
getattr(psdf_or_psser, funcname)()
def test_stat_functions(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]})
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
# empty
self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]])
self._test_stat_functions(pdf.loc[[]], psdf.loc[[]])
def test_stat_functions_multiindex_column(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
def test_stat_functions_with_no_numeric_columns(self):
pdf = pd.DataFrame(
{
"A": ["a", None, "c", "d", None, "f", "g"],
"B": ["A", "B", "C", None, "E", "F", None],
}
)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf, psdf)
def test_sum(self):
pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3))
self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum())
self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1))
self.assert_eq(psdf["a"].sum(), pdf["a"].sum())
self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3))
self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3))
self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum())
self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1))
def test_product(self):
pdf = pd.DataFrame(
{"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.product(), pdf.product(), check_exact=False)
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False)
self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product())
self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1))
self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False)
self.assert_eq(
psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False
)
self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3))
self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3))
self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product())
self.assert_eq(
psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1)
)
def test_abs(self):
pdf = pd.DataFrame(
{
"A": [1, -2, np.nan, -4, 5],
"B": [1.0, -2, np.nan, -4, 5],
"C": [-6.0, -7, -8, np.nan, 10],
"D": ["a", "b", "c", "d", np.nan],
"E": [True, np.nan, False, True, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.A.abs(), pdf.A.abs())
self.assert_eq(psdf.B.abs(), pdf.B.abs())
self.assert_eq(psdf.E.abs(), pdf.E.abs())
# pandas' bug?
# self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs())
self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs())
self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs())
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.abs()
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.D.abs()
def test_axis_on_dataframe(self):
# The number of each count is intentionally big
# because when data is small, it executes a shortcut.
# Less than 'compute.shortcut_limit' will execute a shortcut
# by using collected pandas dataframe directly.
# now we set the 'compute.shortcut_limit' as 1000 explicitly
with option_context("compute.shortcut_limit", 1000):
pdf = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5] * 300,
"B": [1.0, -2, 3, -4, 5] * 300,
"C": [-6.0, -7, -8, -9, 10] * 300,
"D": [True, False, True, False, False] * 300,
},
index=range(10, 15001, 10),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.count(axis=1), pdf.count(axis=1))
self.assert_eq(psdf.var(axis=1), pdf.var(axis=1))
self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0))
self.assert_eq(psdf.std(axis=1), pdf.std(axis=1))
self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0))
self.assert_eq(psdf.max(axis=1), pdf.max(axis=1))
self.assert_eq(psdf.min(axis=1), pdf.min(axis=1))
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1))
self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1))
self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1))
self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1))
self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0))
self.assert_eq(
psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True)
)
self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True))
self.assert_eq(
psdf.var(axis=1, ddof=0, numeric_only=True),
pdf.var(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True))
self.assert_eq(
psdf.std(axis=1, ddof=0, numeric_only=True),
pdf.std(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(
psdf.max(axis=1, numeric_only=True),
pdf.max(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.min(axis=1, numeric_only=True),
pdf.min(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.sum(axis=1, numeric_only=True),
pdf.sum(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.product(axis=1, numeric_only=True),
pdf.product(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True)
)
self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True))
self.assert_eq(
psdf.sem(axis=1, ddof=0, numeric_only=True),
pdf.sem(axis=1, ddof=0, numeric_only=True),
)
def test_corr(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# DataFrame
# we do not handle NaNs for now
pdf = makeMissingDataframe(0.3, 42).fillna(0)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_a = pdf.A
pser_b = pdf.B
psser_a = psdf.A
psser_b = psdf.B
self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b))
self.assertRaises(TypeError, lambda: psser_a.corr(psdf))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_xa = pdf[("X", "A")]
pser_xb = pdf[("X", "B")]
psser_xa = psdf[("X", "A")]
psser_xb = psdf[("X", "B")]
self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True)
def test_cov_corr_meta(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
pdf = pd.DataFrame(
{
"a": np.array([1, 2, 3], dtype="i1"),
"b": np.array([1, 2, 3], dtype="i2"),
"c": np.array([1, 2, 3], dtype="i4"),
"d": np.array([1, 2, 3]),
"e": np.array([1.0, 2.0, 3.0], dtype="f4"),
"f": np.array([1.0, 2.0, 3.0]),
"g": np.array([True, False, True]),
"h": np.array(list("abc")),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr())
def test_stats_on_boolean_dataframe(self):
pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.min(), pdf.min())
self.assert_eq(psdf.max(), pdf.max())
self.assert_eq(psdf.count(), pdf.count())
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.product(), pdf.product())
self.assert_eq(psdf.mean(), pdf.mean())
self.assert_eq(psdf.var(), pdf.var(), check_exact=False)
self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False)
self.assert_eq(psdf.std(), pdf.std(), check_exact=False)
self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False)
self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False)
self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False)
def test_stats_on_boolean_series(self):
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(psser.min(), pser.min())
self.assert_eq(psser.max(), pser.max())
self.assert_eq(psser.count(), pser.count())
self.assert_eq(psser.sum(), pser.sum())
self.assert_eq(psser.product(), pser.product())
self.assert_eq(psser.mean(), pser.mean())
self.assert_eq(psser.var(), pser.var(), almost=True)
self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True)
self.assert_eq(psser.std(), pser.std(), almost=True)
self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True)
self.assert_eq(psser.sem(), pser.sem(), almost=True)
self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True)
def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True)
)
self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True))
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True))
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int)
)
self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True))
self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.var(ddof=0, numeric_only=True),
pdf.var(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.std(ddof=0, numeric_only=True),
pdf.std(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.sem(ddof=0, numeric_only=True),
pdf.sem(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True)))
self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True)))
self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True)))
# Boolean was excluded because of a behavior change in NumPy
# https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits
# but this behavior is inconsistent in pandas context.
# Boolean column in quantile tests are excluded for now.
# TODO(SPARK-35555): track and match the behavior of quantile to pandas'
pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
len(psdf.quantile(q=0.5, numeric_only=True)),
len(pdf.quantile(q=0.5, numeric_only=True)),
)
self.assert_eq(
len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
)
def test_numeric_only_unsupported(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False)
)
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False),
pdf[["i", "b"]].sum(numeric_only=False).astype(int),
)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.sum(numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.s.sum()
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_stats import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
pdamodaran/yellowbrick | yellowbrick/text/dispersion.py | 1 | 10916 | # yellowbrick.text.dispersion
# Implementations of lexical dispersions for text visualization.
#
# Author: Larry Gray
# Created: 2018-06-21 10:06
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: dispersion.py [] lwgray@gmail.com $
"""
Implementation of lexical dispersion for text visualization
"""
##########################################################################
## Imports
##########################################################################
from collections import defaultdict
import itertools
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
import numpy as np
##########################################################################
## Dispersion Plot Visualizer
##########################################################################
class DispersionPlot(TextVisualizer):
"""
DispersionPlotVisualizer allows for visualization of the lexical dispersion
of words in a corpus. Lexical dispersion is a measure of a word's
homeogeneity across the parts of a corpus. This plot notes the occurences
of a word and how many words from the beginning it appears.
Parameters
----------
target_words : list
A list of target words whose dispersion across a corpus passed at fit
will be visualized.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(self, target_words, ax=None, colors=None, ignore_case=False,
annotate_docs=False, labels=None, colormap=None, **kwargs):
super(DispersionPlot, self).__init__(ax=ax, **kwargs)
self.labels = labels
self.colors = colors
self.colormap = colormap
self.target_words = target_words
self.ignore_case = ignore_case
self.annotate_docs = annotate_docs
def _compute_dispersion(self, text, y):
self.boundaries_ = []
offset = 0
if y is None:
y = itertools.repeat(None)
for doc, target in zip(text, y):
for word in doc:
if self.ignore_case:
word = word.lower()
# NOTE: this will find all indices if duplicate words are supplied
# In the case that word is not in target words, any empty list is
# returned and no data will be yielded
offset += 1
for y_coord in (self.indexed_words_ == word).nonzero()[0]:
y_coord = int(y_coord)
yield (offset, y_coord, target)
if self.annotate_docs:
self.boundaries_.append(offset)
self.boundaries_ = np.array(self.boundaries_, dtype=int)
def _check_missing_words(self, points):
for index in range(len(self.indexed_words_)):
if index in points[:,1]:
pass
else:
raise YellowbrickValueError((
"The indexed word '{}' is not found in "
"this corpus"
).format(self.indexed_words_[index]))
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Create an index (e.g. the y position) for the target words
self.indexed_words_ = np.flip(self.target_words, axis=0)
if self.ignore_case:
self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_])
# Stack is used to create a 2D array from the generator
try:
points_target = np.stack(self._compute_dispersion(X, y))
except ValueError:
raise YellowbrickValueError((
"No indexed words were found in the corpus"
))
points = np.stack(zip(points_target[:,0].astype(int),
points_target[:,1].astype(int)))
self.target = points_target[:,2]
self._check_missing_words(points)
self.draw(points, self.target)
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError((
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_)))
# Create the color mapping for the labels.
color_values = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.color)
colors = dict(zip(labels, color_values))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Define boundaries with a vertical line
if self.annotate_docs:
for xcoords in self.boundaries_:
self.ax.axvline(x=xcoords, color='lightgray', linestyle='dashed')
series = defaultdict(lambda: {'x':[], 'y':[]})
if target is not None:
for point, t in zip(points, target):
label = labels[t]
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]['x'].append(x)
series[label]['y'].append(y)
for label, points in series.items():
self.ax.scatter(points['x'], points['y'], marker='|',
c=colors[label], zorder=100, label=label)
self.ax.set_yticks(list(range(len(self.indexed_words_))))
self.ax.set_yticklabels(self.indexed_words_)
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls poof & poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
self.ax.set_ylim(-1, len(self.indexed_words_))
self.ax.set_title("Lexical Dispersion Plot")
self.ax.set_xlabel("Word Offset")
self.ax.grid(False)
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##########################################################################
## Quick Method
##########################################################################
def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None,
labels=None, annotate_docs=False, ignore_case=False, **kwargs):
""" Displays lexical dispersion plot for words in a corpus
This helper function is a quick wrapper to utilize the DisperstionPlot
Visualizer for one-off analysis
Parameters
----------
words : list
A list of words whose dispersion will be examined within a corpus
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
corpus : list
Should be provided as a list of documents that contain
a list of words in the order they appear in the document.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
kwargs : dict
Pass any additional keyword arguments to the super class.
Returns
-------
ax: matplotlib axes
Returns the axes that the plot was drawn on
"""
# Instantiate the visualizer
visualizer = DispersionPlot(
words, ax=ax, colors=colors, colormap=colormap,
ignore_case=ignore_case, labels=labels,
annotate_docs=annotate_docs, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(corpus, y, **kwargs)
# Return the axes object on the visualizer
return visualizer.ax
| apache-2.0 |
vybstat/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
beepee14/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
ischwabacher/seaborn | seaborn/algorithms.py | 35 | 6889 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a, np.float)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| bsd-3-clause |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_JMI.py | 1 | 1528 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import JMI
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx,_,_ = JMI.jmi(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| mit |
Insight-book/data-science-from-scratch | first-edition/code/gradient_descent.py | 53 | 5895 | from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
| unlicense |
remenska/rootpy | rootpy/plotting/contrib/plot_corrcoef_matrix.py | 5 | 12192 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from ...extern.six.moves import range
from ...extern.six import string_types
__all__ = [
'plot_corrcoef_matrix',
'corrcoef',
'cov',
]
def plot_corrcoef_matrix(matrix, names=None,
cmap=None, cmap_text=None,
fontsize=12, grid=False,
axes=None):
"""
This function will draw a lower-triangular correlation matrix
Parameters
----------
matrix : 2-dimensional numpy array/matrix
A correlation coefficient matrix
names : list of strings, optional (default=None)
List of the parameter names corresponding to the rows in ``matrix``.
cmap : matplotlib color map, optional (default=None)
Color map used to color the matrix cells.
cmap_text : matplotlib color map, optional (default=None)
Color map used to color the cell value text. If None, then
all values will be black.
fontsize : int, optional (default=12)
Font size of parameter name and correlation value text.
grid : bool, optional (default=False)
If True, then draw dashed grid lines around the matrix elements.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
Notes
-----
NumPy and matplotlib are required
Examples
--------
>>> matrix = corrcoef(data.T, weights=weights)
>>> plot_corrcoef_matrix(matrix, names)
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
if axes is None:
axes = plt.gca()
matrix = np.asarray(matrix)
if matrix.ndim != 2:
raise ValueError("matrix is not a 2-dimensional array or matrix")
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix is not square")
if names is not None and len(names) != matrix.shape[0]:
raise ValueError("the number of names does not match the number of "
"rows/columns in the matrix")
# mask out the upper triangular matrix
matrix[np.triu_indices(matrix.shape[0])] = np.nan
if isinstance(cmap_text, string_types):
cmap_text = cm.get_cmap(cmap_text, 201)
if cmap is None:
cmap = cm.get_cmap('jet', 201)
elif isinstance(cmap, string_types):
cmap = cm.get_cmap(cmap, 201)
# make NaN pixels white
cmap.set_bad('w')
axes.imshow(matrix, interpolation='nearest',
cmap=cmap, origin='upper',
vmin=-1, vmax=1)
axes.set_frame_on(False)
plt.setp(axes.get_yticklabels(), visible=False)
plt.setp(axes.get_yticklines(), visible=False)
plt.setp(axes.get_xticklabels(), visible=False)
plt.setp(axes.get_xticklines(), visible=False)
if grid:
# draw grid lines
for slot in range(1, matrix.shape[0] - 1):
# vertical
axes.plot((slot - 0.5, slot - 0.5),
(slot - 0.5, matrix.shape[0] - 0.5), 'k:', linewidth=1)
# horizontal
axes.plot((-0.5, slot + 0.5),
(slot + 0.5, slot + 0.5), 'k:', linewidth=1)
if names is not None:
for slot in range(1, matrix.shape[0]):
# diagonal
axes.plot((slot - 0.5, slot + 1.5),
(slot - 0.5, slot - 2.5), 'k:', linewidth=1)
# label cell values
for row, col in zip(*np.tril_indices(matrix.shape[0], k=-1)):
value = matrix[row][col]
if cmap_text is not None:
color = cmap_text((value + 1.) / 2.)
else:
color = 'black'
axes.text(
col, row,
"{0:d}%".format(int(value * 100)),
color=color,
ha='center', va='center',
fontsize=fontsize)
if names is not None:
# write parameter names
for i, name in enumerate(names):
axes.annotate(
name, (i, i),
rotation=45,
ha='left', va='bottom',
transform=axes.transData,
fontsize=fontsize)
def cov(m, y=None, rowvar=1, bias=0, ddof=None, weights=None, repeat_weights=0):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
import numpy as np
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
X = np.array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None), np.newaxis)
else:
axis = 1
tup = (np.newaxis, slice(None))
if y is not None:
y = np.array(y, copy=False, ndmin=2, dtype=float)
X = np.concatenate((X, y), axis)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if weights is not None:
weights = np.array(weights, dtype=float)
weights_sum = weights.sum()
if weights_sum <= 0:
raise ValueError(
"sum of weights is non-positive")
X -= np.average(X, axis=1-axis, weights=weights)[tup]
if repeat_weights:
# each weight represents a number of repetitions of an observation
# the total sample size can be determined in this case and we have
# both an unbiased and biased weighted covariance
fact = weights_sum - ddof
else:
# normalize weights so they sum to unity
weights /= weights_sum
# unbiased weighted covariance is not defined if the weights are
# not integral frequencies (repeat-type)
fact = (1. - np.power(weights, 2).sum())
else:
weights = 1
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
fact = float(N - ddof)
if not rowvar:
return (np.dot(weights * X.T, X.conj()) / fact).squeeze()
else:
return (np.dot(weights * X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None, weights=None,
repeat_weights=0):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
import numpy as np
c = cov(x, y, rowvar, bias, ddof, weights, repeat_weights)
if c.size == 0:
# handle empty arrays
return c
try:
d = np.diag(c)
except ValueError: # scalar covariance
return 1
return c / np.sqrt(np.multiply.outer(d, d))
| gpl-3.0 |
imaculate/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
chungjjang80/FRETBursts | fretbursts/utils/examples/matplotlib_figure_mod_toolbar.py | 2 | 1276 | """
Example on how to add widgets the toolbar of a Matplotlib figure using the
QT backend.
No QT application is created, only the toolbar of the native MPL figure is
modified.
"""
from PySide import QtGui, QtCore
import matplotlib
def test():
plot([1,2,3], lw=2)
q = qt4_interface(gcf())
return q # WARNING: it's paramount to return the object otherwise, with
# no references, python deletes it and the GUI doesn't respond!
class qt4_interface:
def __init__(self,fig):
self.fig = fig
toolbar = fig.canvas.toolbar
self.line_edit = QtGui.QLineEdit()
toolbar.addWidget(self.line_edit)
self.line_edit.editingFinished.connect(self.do_something)
self.spinbox = QtGui.QDoubleSpinBox()
toolbar.addWidget(self.spinbox)
self.spinbox.valueChanged.connect(self.do_something2)
def do_something(self, *args):
self.fig.axes[0].set_title(self.line_edit.text())
self.fig.canvas.draw()
#f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
def do_something2(self, *args):
self.fig.axes[0].set_xlim(0, self.spinbox.value())
self.fig.canvas.draw()
#f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
| gpl-2.0 |
zrhans/pythonanywhere | pyscripts/ply_wrose.py | 1 | 1678 | """
DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max,Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max,Pres.Atm.,Pres.Atm._min,Pres.Atm._max,Temp.Int.,Temp.Int._min,Temp.Int._max,CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max,SO2,SO2_min,SO2_max,O3,O3_min,O3_max,NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max,CO,CO_min,CO_max,MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max,Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max
"""
import plotly.plotly as py # Every function in this module will communicate with an external plotly server
import plotly.graph_objs as go
import pandas as pd
DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv'
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
x = df.DVE
y = df.VVE
#print(y)
# Definindo as series dedados
trace1 = go.Area(
r = y,#["2015-12-01","2015-12-01 01:00:00","2015-12-01 02:00:00","2015-12-01 03:00:00","2015-12-01 04:00:00","2015-12-01 05:00:00"],
t = x,#[74.73,76.59,76.5,79.03,77.89,81.9,],
name='Vento m/s',
marker=dict(
color='rgb(158,154,200)'
)
)
# Edit the layout
layout = go.Layout(
title='Distribuição da Velocidade do Vento no diagrama Laurel',
font = dict(size=16),
radialaxis=dict(
ticksuffix='m/s'
),
orientation=270
)
data = [trace1]
fig = go.Figure(data=data, layout=layout)
# Tracando o objeto
py.plot(
fig,
filename='hans/oi_wrose', # name of the file as saved in your plotly account
sharing='public'
) # 'public' | 'private' | 'secret': Learn more: https://plot.ly/python/privacy
| apache-2.0 |
datapythonista/pandas | pandas/core/arrays/sparse/accessor.py | 2 | 11479 | """Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, data = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
data.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
tmp = np.mean([column.array.density for _, column in self._parent.items()])
return tmp
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
| bsd-3-clause |
Rocamadour7/ml_tutorial | 05. Clustering/titanic-data-example.py | 1 | 1721 | import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
import pandas as pd
'''
Pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival (0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare (British pound)
cabin Cabin
embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
'''
df = pd.read_excel('titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = handle_non_numerical_data(df)
X = np.array(df.drop(['survived'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = KMeans(n_clusters=2)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print(correct/len(X))
| mit |
mlperf/training_results_v0.7 | NVIDIA/benchmarks/minigo/implementations/tensorflow/minigo/oneoffs/training_curve.py | 8 | 5964 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Used to plot the accuracy of the policy and value networks in
predicting professional game moves and results over the course
of training. Check FLAGS for default values for what models to
load and what sgf files to parse.
Usage:
python training_curve.py
Sample 3 positions from each game
python training_curve.py --num_positions=3
Only grab games after 2005 (default is 2000)
python training_curve.py --min_year=2005
"""
import sys
sys.path.insert(0, '.')
import os.path
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from absl import app, flags
from tqdm import tqdm
import coords
from rl_loop import fsdb
import oneoff_utils
flags.DEFINE_string("sgf_dir", None, "sgf database")
flags.DEFINE_string("plot_dir", "data", "Where to save the plots.")
flags.DEFINE_integer("min_year", "2000",
"Only take sgf games with date >= min_year")
flags.DEFINE_string("komi", "7.5", "Only take sgf games with given komi")
flags.DEFINE_integer("idx_start", 150, "Only take models after given idx")
flags.DEFINE_integer("num_positions", 1,
"How many positions from each game to sample from.")
flags.DEFINE_integer("eval_every", 5,
"Eval every k models to generate the curve")
flags.mark_flag_as_required('sgf_dir')
FLAGS = flags.FLAGS
def batch_run_many(player, positions, batch_size=100):
"""Used to avoid a memory oveflow issue when running the network
on too many positions. TODO: This should be a member function of
player.network?"""
prob_list = []
value_list = []
for idx in range(0, len(positions), batch_size):
probs, values = player.network.run_many(positions[idx:idx + batch_size])
prob_list.append(probs)
value_list.append(values)
return np.concatenate(prob_list, axis=0), np.concatenate(value_list, axis=0)
def eval_player(player, positions, moves, results):
probs, values = batch_run_many(player, positions)
policy_moves = [coords.from_flat(c) for c in np.argmax(probs, axis=1)]
top_move_agree = [moves[idx] == policy_moves[idx]
for idx in range(len(moves))]
square_err = (values - results) ** 2 / 4
return top_move_agree, square_err
def sample_positions_from_games(sgf_files, num_positions=1):
pos_data = []
move_data = []
result_data = []
move_idxs = []
fail_count = 0
for path in tqdm(sgf_files, desc="loading sgfs", unit="games"):
try:
positions, moves, results = oneoff_utils.parse_sgf_to_examples(path)
except KeyboardInterrupt:
raise
except Exception as e:
print("Parse exception:", e)
fail_count += 1
continue
# add entire game
if num_positions == -1:
pos_data.extend(positions)
move_data.extend(moves)
move_idxs.extend(range(len(positions)))
result_data.extend(results)
else:
for idx in np.random.choice(len(positions), num_positions):
pos_data.append(positions[idx])
move_data.append(moves[idx])
result_data.append(results[idx])
move_idxs.append(idx)
print("Sampled {} positions, failed to parse {} files".format(
len(pos_data), fail_count))
return pos_data, move_data, result_data, move_idxs
def get_training_curve_data(
model_dir, pos_data, move_data, result_data, idx_start, eval_every):
model_paths = oneoff_utils.get_model_paths(model_dir)
df = pd.DataFrame()
player = None
print("Evaluating models {}-{}, eval_every={}".format(
idx_start, len(model_paths), eval_every))
for idx in tqdm(range(idx_start, len(model_paths), eval_every)):
if player:
oneoff_utils.restore_params(model_paths[idx], player)
else:
player = oneoff_utils.load_player(model_paths[idx])
correct, squared_errors = eval_player(
player=player, positions=pos_data,
moves=move_data, results=result_data)
avg_acc = np.mean(correct)
avg_mse = np.mean(squared_errors)
print("Model: {}, acc: {:.4f}, mse: {:.4f}".format(
model_paths[idx], avg_acc, avg_mse))
df = df.append({"num": idx, "acc": avg_acc,
"mse": avg_mse}, ignore_index=True)
return df
def save_plots(data_dir, df):
plt.plot(df["num"], df["acc"])
plt.xlabel("Model idx")
plt.ylabel("Accuracy")
plt.title("Accuracy in Predicting Professional Moves")
plot_path = os.path.join(data_dir, "move_acc.pdf")
plt.savefig(plot_path)
plt.figure()
plt.plot(df["num"], df["mse"])
plt.xlabel("Model idx")
plt.ylabel("MSE/4")
plt.title("MSE in predicting outcome")
plot_path = os.path.join(data_dir, "value_mse.pdf")
plt.savefig(plot_path)
def main(unusedargv):
sgf_files = oneoff_utils.find_and_filter_sgf_files(
FLAGS.sgf_dir, FLAGS.min_year, FLAGS.komi)
pos_data, move_data, result_data, move_idxs = sample_positions_from_games(
sgf_files=sgf_files, num_positions=FLAGS.num_positions)
df = get_training_curve_data(fsdb.models_dir(), pos_data, move_data,
result_data, FLAGS.idx_start, FLAGS.eval_every)
save_plots(FLAGS.plot_dir, df)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
asimshankar/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
sanketloke/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
vshtanko/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
kbrose/article-tagging | lib/tagnews/utils/load_data.py | 1 | 18109 | import pandas as pd
import numpy as np
import re
import json
import os
import warnings
import shutil
from pathlib import Path
import codecs
"""
Helper functions to load the article data. The main method to use
is load_data().
"""
# Caution! Modifying this in code will have no effect since the
# default arguments are populated with this reference at creation
# time, so post-hoc modifications will do nothing.
__data_folder = os.path.join(os.path.split(__file__)[0], '..', 'data')
def clean_string(s):
"""
Clean all the HTML/Unicode nastiness out of a string.
Replaces newlines with spaces.
"""
return s.replace('\r', '').replace('\n', ' ').replace('\xa0', ' ').strip()
def load_articles(data_folder=__data_folder, nrows=None):
"""
Loads the articles CSV. Can optionally only load the first
`nrows` number of rows.
"""
column_names = ['id',
'feedname',
'url',
'orig_html',
'title',
'bodytext',
'relevant',
'created',
'last_modified',
'news_source_id',
'author']
return pd.read_csv(os.path.join(data_folder,
'newsarticles_article.csv'),
header=None,
names=column_names,
nrows=nrows,
dtype={'orig_html': str, 'author': str})
def load_taggings(data_folder=__data_folder):
"""Loads the type-of-crime human tagging of the articles."""
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc = pd.read_csv(os.path.join(data_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
uc.set_index('id', drop=True, inplace=True)
uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
uc_tags = pd.read_csv(
os.path.join(data_folder, 'newsarticles_usercoding_categories.csv'),
header=None,
names=uc_tags_column_names
)
uc_tags.set_index('usercoding_id', drop=True, inplace=True)
uc_tags['article_id'] = uc.loc[uc_tags.index, 'article_id']
return uc_tags
def load_model_categories(data_folder=__data_folder):
tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tcr = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcategoryrelevance.csv'),
names=tcr_names
)
tc = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
names=tc_names
).set_index('id', drop=True)
tcr['article_id'] = tc.loc[tcr['coding_id']]['article_id'].values
return tcr
def load_model_locations(data_folder=__data_folder):
tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tl = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedlocation.csv'),
names=tl_names
)
tc = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
names=tc_names
).set_index('id', drop=True)
tl['article_id'] = tc.loc[tl['coding_id']]['article_id'].values
return tl
def load_locations(data_folder=__data_folder):
"""Load the human-extracted locations from the articles."""
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc = pd.read_csv(os.path.join(data_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
uc['locations'] = uc['locations'].apply(lambda x: json.loads(x))
return uc
def load_categories(data_folder=__data_folder):
"""Loads the mapping of id to names/abbrevations of categories"""
column_names = ['id', 'category_name', 'abbreviation', 'created',
'active', 'kind']
return pd.read_csv(os.path.join(data_folder, 'newsarticles_category.csv'),
header=None,
names=column_names)
def load_data(data_folder=__data_folder, nrows=None):
"""
Creates a dataframe of the article information and k-hot encodes the tags
into columns called cat_NUMBER. The k-hot encoding is done assuming that
the categories are 1-indexed and there are as many categories as the
maximum value of the numerical cateogry_id column.
Inputs:
data_folder:
A folder containing the data files in CSV format.
nrows:
Number of articles to load. Defaults to all, which uses about 4
GB of memory.
"""
df = load_articles(data_folder=data_folder, nrows=nrows)
df['relevant'] = df['relevant'] == 't'
df.rename(columns={'id': 'article_id'}, inplace=True)
df.set_index('article_id', drop=True, inplace=True)
# hopefully this will save some memory/space, can add back if needed
del(df['orig_html'])
tags_df = load_taggings(data_folder)
# will help cacheing
tags_df.sort_values(by='article_id', inplace=True)
tags_df = tags_df.loc[tags_df['article_id'].isin(
df.index.intersection(tags_df['article_id']))]
locs_df = load_locations(data_folder)
locs_df.sort_values(by='article_id', inplace=True)
locs_df = locs_df.loc[locs_df['article_id'].isin(
df.index.intersection(locs_df['article_id']))]
model_tags_df = load_model_categories(data_folder)
# will help cacheing
model_tags_df.sort_values(by='article_id', inplace=True)
model_tags_df = model_tags_df.loc[model_tags_df['article_id'].isin(
df.index.intersection(model_tags_df['article_id']))]
# init with empty lists
df['locations'] = np.empty([df.shape[0], 0]).tolist()
loc_article_ids = locs_df['article_id'].values
df.loc[loc_article_ids, 'locations'] = locs_df['locations'].values
def find_loc_in_string(locs, string):
"""
The locations are generated from JavaScript, which means there's
going to be some problems getting things to line up exactly and
neatly. This function will hopefully performa all necessary
transformations to find the given location text within the
larger string.
Inputs:
locs: list of locations as loaded by load_locations
string: bodytext of article in which to find locs
Returns:
updated_locs: list of locations as loaded by
load_locations, but with a couple
extra fields ('cleaned text' and 'cleaned span').
"""
for i, loc in enumerate(locs):
loc_txt = loc['text']
loc_txt = clean_string(loc_txt)
string = clean_string(string)
loc['cleaned text'] = loc_txt
spans = [x.span() for x in re.finditer(re.escape(loc_txt), string)]
if spans:
# The string may have occurred multiple times, and since the
# spans don't line up perfectly we can't know which one is the
# "correct" one. Best we can do is find the python span closest
# to the expected javascript span.
closest = np.argmin(np.abs(
np.array([x[0] for x in spans]) - loc['start']
))
loc['cleaned span'] = spans[closest]
locs[i] = loc
return locs
df['locations'] = df.apply(
lambda r: find_loc_in_string(r['locations'], r['bodytext']),
axis=1
)
num_no_match = df['locations'].apply(
lambda locs: any([('cleaned span' not in loc) for loc in locs])
).sum()
if num_no_match:
warnings.warn(('{} location strings were not found in'
' the bodytext.').format(num_no_match),
RuntimeWarning)
model_locations_df = load_model_locations(data_folder)
model_locations_df = model_locations_df.set_index('article_id')
model_locations_gb = model_locations_df.groupby('article_id')
model_locations_text = model_locations_gb['text'].apply(list)
df['model_location_text'] = model_locations_text
categories_df = load_categories(data_folder)
categories_df.set_index('id', drop=True, inplace=True)
# tags_df['category_id'] = tags_df['category_id'].astype(str)
tags_df['category_abbreviation'] = (categories_df
['abbreviation']
[tags_df['category_id']]
.values)
model_tags_df['category_abbreviation'] = (categories_df
['abbreviation']
[model_tags_df['category_id']]
.values)
if np.setdiff1d(tags_df['article_id'].values, df.index.values).size:
warnings.warn('Tags were found for article IDs that do not exist.',
RuntimeWarning)
def update_df_with_categories(article_ids, cat_abbreviations, vals,
is_model):
# for some reason, some articles that are tagged don't show up
# in the articles CSV. filter those out.
existing_ids_filter = np.isin(article_ids, df.index.values)
article_ids = article_ids[existing_ids_filter]
cat_abbreviations = cat_abbreviations[existing_ids_filter]
vals = vals[existing_ids_filter]
for i in range(categories_df.shape[0]):
cat_name = categories_df.loc[i+1, 'abbreviation']
if is_model:
cat_name += '_model'
df[cat_name] = 0
if not is_model:
df[cat_name] = df[cat_name].astype('int8')
matches = cat_abbreviations == cat_name
if not matches.sum():
continue
df.loc[article_ids[matches], cat_name] = vals[matches]
update_df_with_categories(
model_tags_df['article_id'].values,
model_tags_df['category_abbreviation'].values + '_model',
model_tags_df['relevance'].values,
is_model=True
)
update_df_with_categories(
tags_df['article_id'].values,
tags_df['category_abbreviation'].values,
np.ones((tags_df['article_id'].values.shape), dtype='int8'),
is_model=False
)
df.loc[df['bodytext'].isnull(), 'bodytext'] = ''
return df
def subsample_and_resave(out_folder, n=5, input_folder=__data_folder,
random_seed=5):
"""
Subsamples the CSV data files so that we have at least
`n` articles from each type-of-crime tag as determined
by the human coding. Saves the subsampled CSV data
into `out_folder`. If there are fewer than `n` articles
tagged with a type-of-crime, then we will use all of
the articles with that tag.
Inputs
------
out_folder : str
Path to folder where data should be saved. Should already exist.
n : int
How many examples from each category should we have?
input_folder : str
Path to where the full CSV files are saved.
random_seed : None or int
np.random.RandomState() will be seeded with this value
in order to perform the random subsampling.
"""
out_folder = str(Path(out_folder).expanduser().absolute())
input_folder = str(Path(input_folder).expanduser().absolute())
if out_folder == input_folder:
raise RuntimeError('out_folder cannot match input_folder.')
random_state = np.random.RandomState(random_seed)
df = load_data(input_folder)
chosen_indexes = []
for crime_type in df.loc[:, 'OEMC':].columns:
is_type = df[crime_type].astype(bool)
n_samps = min(n, is_type.sum())
chosen_indexes += (df.loc[is_type, :]
.sample(n_samps, random_state=random_state)
.index
.tolist())
del df
chosen_indexes = sorted(list(set(chosen_indexes)))
# newsarticles_article.csv
articles_df = load_articles(input_folder)
sample = (articles_df
.reset_index()
.set_index('id')
.loc[chosen_indexes, 'index'])
articles_df = articles_df.loc[sample, :]
# garble garble
articles_df['bodytext'] = articles_df['bodytext'].astype(str).apply(
lambda x: codecs.encode(x, 'rot-13')
)
articles_df.to_csv(os.path.join(out_folder, 'newsarticles_article.csv'),
header=None, index=False)
del articles_df
# newsarticles_category.csv
shutil.copyfile(os.path.join(input_folder, 'newsarticles_category.csv'),
os.path.join(out_folder, 'newsarticles_category.csv'))
# newsarticles_usercoding.csv
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc_df = pd.read_csv(os.path.join(input_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
sample = np.where(uc_df['article_id'].isin(chosen_indexes))[0]
uc_df.loc[sample, :].to_csv(
os.path.join(out_folder, 'newsarticles_usercoding.csv'),
header=None, index=False
)
uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
# newsarticles_usercoding_categories.csv
uc_tags_df = pd.read_csv(
os.path.join(input_folder,
'newsarticles_usercoding_categories.csv'),
header=None,
names=uc_tags_column_names,
dtype={'id': int, 'usercoding_id': int, 'category_id': int}
)
sample = np.where(uc_df
.set_index('id')
.loc[uc_tags_df['usercoding_id'], 'article_id']
.isin(chosen_indexes)
)[0]
uc_tags_df = uc_tags_df.loc[sample, :]
uc_tags_df.to_csv(
os.path.join(out_folder, 'newsarticles_usercoding_categories.csv'),
header=None, index=False
)
# newsarticles_trainedcoding
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tc = pd.read_csv(
'tagnews/data/newsarticles_trainedcoding.csv',
names=tc_names
)
tc = tc.loc[tc['article_id'].isin(chosen_indexes)]
tc.to_csv(
os.path.join(out_folder, 'newsarticles_trainedcoding.csv'),
header=False, index=False
)
# newsarticles_trainedcategoryrelevance
tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
tcr = pd.read_csv(
'tagnews/data/newsarticles_trainedcategoryrelevance.csv',
names=tcr_names
)
tcr = tcr.loc[tcr['coding_id'].isin(tc['id'])]
tcr.to_csv(
os.path.join(out_folder, 'newsarticles_trainedcategoryrelevance.csv'),
header=False, index=False
)
# newsarticles_trainedlocation
tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
tl = pd.read_csv(
'tagnews/data/newsarticles_trainedlocation.csv',
names=tl_names
)
tl = tl.loc[tl['coding_id'].isin(tc['id'])]
tl.to_csv(
os.path.join(out_folder, 'newsarticles_trainedlocation.csv'),
header=False, index=False
)
def load_crime_data(data_folder=__data_folder):
crimes = pd.read_csv(os.path.join(data_folder, 'Crimes.csv'))
crimes = crimes[crimes['Year'] > 2010]
crime_string = pd.Series('', crimes.index)
# ['ID', 'Case Number', 'Date', 'Block', 'IUCR', 'Primary Type',
# 'Description', 'Location Description', 'Arrest', 'Domestic', 'Beat',
# 'District', 'Ward', 'Community Area', 'FBI Code', 'X Coordinate',
# 'Y Coordinate', 'Year', 'Updated On', 'Latitude', 'Longitude',
# 'Location']
# TODO: synonyms on this for month name, weekday name,
# time of day (e.g. afternoon), etc.
crime_string += crimes['Date'] + ' '
# TODO: synonyms?
crime_string += crimes['Primary Type'] + ' '
# TODO: synonyms?
crime_string += crimes['Description'] + ' '
# TODO: synonyms?
crime_string += crimes['Location Description'] + ' '
# TODO: synonyms?
iucr = pd.read_csv(os.path.join(data_folder, 'IUCR.csv'))
iucr.set_index('IUCR', drop=True, inplace=True)
idx = iucr.index
idx_values = idx.values
idx_values[idx.str.len() == 3] = '0' + idx_values[idx.str.len() == 3]
crime_string += (iucr.loc[crimes['IUCR'], 'PRIMARY DESCRIPTION']
.fillna('')
.values
+ ' ')
crime_string += (iucr.loc[crimes['IUCR'], 'SECONDARY DESCRIPTION']
.fillna('')
.values
+ ' ')
community_areas = pd.read_csv(os.path.join(data_folder, 'CommAreas.csv'))
community_areas.set_index('AREA_NUM_1', inplace=True, drop=True)
crime_string += (community_areas.loc[crimes['Community Area'], 'COMMUNITY']
.fillna('')
.values
+ ' ')
return crimes, crime_string
def load_ner_data(data_folder=__data_folder):
"""
Loads ner.csv from the specified data folder.
The column 'stag' is a binary value indicating whether or not
the row corresponds to the entity "geo". Typically, you will
want to use column 'word' to predict the column 'stag'.
"""
df = pd.read_csv(os.path.join(data_folder, 'ner.csv'),
encoding="ISO-8859-1",
error_bad_lines=False,
index_col=0)
df.dropna(subset=['word', 'tag'], inplace=True)
df.reset_index(inplace=True, drop=True)
df['stag'] = (df['tag'] == 'B-geo') | (df['tag'] == 'I-geo')
df['all_tags'] = df['tag']
df['tag'] = df['stag']
df = df[['word', 'all_tags', 'tag']]
return df
| mit |
Fireblend/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
xebitstudios/Kayak | examples/poisson_glm.py | 3 | 1224 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import kayak
N = 10000
D = 5
P = 1
learn = 0.00001
batch_size = 500
# Random inputs.
X = npr.randn(N,D)
true_W = npr.randn(D,P)
lam = np.exp(np.dot(X, true_W))
Y = npr.poisson(lam)
kyk_batcher = kayak.Batcher(batch_size, N)
# Build network.
kyk_inputs = kayak.Inputs(X, kyk_batcher)
# Labels.
kyk_targets = kayak.Targets(Y, kyk_batcher)
# Weights.
W = 0.01*npr.randn(D,P)
kyk_W = kayak.Parameter(W)
# Linear layer.
kyk_activation = kayak.MatMult( kyk_inputs, kyk_W)
# Exponential inverse-link function.
kyk_lam = kayak.ElemExp(kyk_activation)
# Poisson negative log likelihood.
kyk_nll = kyk_lam - kayak.ElemLog(kyk_lam) * kyk_targets
# Sum the losses.
kyk_loss = kayak.MatSum( kyk_nll )
for ii in xrange(100):
for batch in kyk_batcher:
loss = kyk_loss.value
print loss, np.sum((kyk_W.value - true_W)**2)
grad = kyk_loss.grad(kyk_W)
kyk_W.value -= learn * grad
# Plot the true and inferred rate for a subset of data.
T_slice = slice(0,100)
kyk_inputs.value = X[T_slice,:]
plt.figure()
plt.plot(lam[T_slice], 'k')
plt.plot(kyk_lam.value, '--r')
plt.show() | mit |
JanNash/sms-tools | lectures/06-Harmonic-model/plots-code/spectral-peaks.py | 22 | 1161 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis, pX,'c', lw=1.5)
plt.axis([0,7000, min(pX),10])
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
DamCB/tyssue | tyssue/draw/ipv_draw.py | 2 | 8114 | """3D visualisation inside the notebook.
"""
import warnings
import numpy as np
import pandas as pd
from matplotlib import cm
from ipywidgets import interact
from ..config.draw import sheet_spec
from ..utils.utils import spec_updater, get_sub_eptm
try:
import ipyvolume as ipv
except ImportError:
print(
"""
This module needs ipyvolume to work.
You can install it with:
$ conda install -c conda-forge ipyvolume
"""
)
def browse_history(history, coords=["x", "y", "z"], **draw_specs_kw):
times = history.time_stamps
num_frames = times.size
draw_specs = sheet_spec()
spec_updater(draw_specs, draw_specs_kw)
sheet = history.retrieve(0)
ipv.clear()
fig, meshes = sheet_view(sheet, coords, **draw_specs_kw)
lim_inf = sheet.vert_df[sheet.coords].min().min()
lim_sup = sheet.vert_df[sheet.coords].max().max()
ipv.xyzlim(lim_inf, lim_sup)
def set_frame(i=0):
fig.animation = 0
t = times[i]
meshes = _get_meshes(history.retrieve(t), coords, draw_specs)
update_view(fig, meshes)
ipv.show()
interact(set_frame, i=(0, num_frames - 1))
def update_view(fig, meshes):
for old, new in zip(fig.meshes, meshes):
old.x = new.x
old.y = new.y
old.z = new.z
old.color = new.color
old.triangles = new.triangles
old.lines = new.lines
def sheet_view(sheet, coords=["x", "y", "z"], **draw_specs_kw):
"""
Creates a javascript renderer of the edge lines to be displayed
in Jupyter Notebooks
Returns
-------
fig: a :class:`ipyvolume.widgets.Figure` widget
mesh: a :class:`ipyvolume.widgets.Mesh` mesh widget
"""
# ipv.style.use(["dark", "minimal"])
draw_specs = sheet_spec()
spec_updater(draw_specs, draw_specs_kw)
fig = ipv.gcf()
fig.meshes = fig.meshes + _get_meshes(sheet, coords, draw_specs)
box_size = max(*(np.ptp(sheet.vert_df[u]) for u in sheet.coords))
border = 0.05 * box_size
lim_inf = sheet.vert_df[sheet.coords].min().min() - border
lim_sup = sheet.vert_df[sheet.coords].max().max() + border
ipv.xyzlim(lim_inf, lim_sup)
return fig, fig.meshes
def view_ipv(sheet, coords=["x", "y", "z"], **edge_specs):
"""
Creates a javascript renderer of the edge lines to be displayed
in Jupyter Notebooks
Returns
-------
fig: a :class:`ipyvolume.widgets.Figure` widget
mesh: a :class:`ipyvolume.widgets.Mesh` mesh widget
"""
warnings.warn("`view_ipv` is deprecated, use the more generic `sheet_view`")
mesh = edge_mesh(sheet, coords, **edge_specs)
fig = ipv.gcf()
fig.meshes = fig.meshes + [mesh]
box_size = max(*(np.ptp(sheet.vert_df[u]) for u in sheet.coords))
border = 0.05 * box_size
lim_inf = sheet.vert_df[sheet.coords].min().min() - border
lim_sup = sheet.vert_df[sheet.coords].max().max() + border
ipv.xyzlim(lim_inf, lim_sup)
return fig, mesh
def edge_mesh(sheet, coords, **edge_specs):
"""
Creates a ipyvolume Mesh of the edge lines to be displayed
in Jupyter Notebooks
Returns
-------
mesh: a :class:`ipyvolume.widgets.Mesh` mesh widget
"""
spec = sheet_spec()["edge"]
spec.update(**edge_specs)
if callable(spec["color"]):
spec["color"] = spec["color"](sheet)
if isinstance(spec["color"], str):
color = spec["color"]
elif hasattr(spec["color"], "__len__"):
color = _wire_color_from_sequence(spec, sheet)[:, :3]
u, v, w = coords
mesh = ipv.Mesh(
x=sheet.vert_df[u],
y=sheet.vert_df[v],
z=sheet.vert_df[w],
lines=sheet.edge_df[["srce", "trgt"]].astype(dtype=np.uint32),
color=color,
)
return mesh
def face_mesh(sheet, coords, **face_draw_specs):
"""
Creates a ipyvolume Mesh of the face polygons
"""
Ne, Nf = sheet.Ne, sheet.Nf
if callable(face_draw_specs["color"]):
face_draw_specs["color"] = face_draw_specs["color"](sheet)
if isinstance(face_draw_specs["color"], str):
color = face_draw_specs["color"]
elif hasattr(face_draw_specs["color"], "__len__"):
color = _face_color_from_sequence(face_draw_specs, sheet)[:, :3]
if "visible" in sheet.face_df.columns:
edges = sheet.edge_df[sheet.upcast_face(sheet.face_df["visible"])].index
_sheet = get_sub_eptm(sheet, edges)
if _sheet is not None:
sheet = _sheet
if isinstance(color, np.ndarray):
faces = sheet.face_df["face_o"].values.astype(np.uint32)
edges = edges.values.astype(np.uint32)
indexer = np.concatenate([faces, edges + Nf, edges + Ne + Nf])
color = color.take(indexer, axis=0)
epsilon = face_draw_specs.get("epsilon", 0)
up_srce = sheet.edge_df[["s" + c for c in coords]]
up_trgt = sheet.edge_df[["t" + c for c in coords]]
Ne, Nf = sheet.Ne, sheet.Nf
if epsilon > 0:
up_face = sheet.edge_df[["f" + c for c in coords]].values
up_srce = (up_srce - up_face) * (1 - epsilon) + up_face
up_trgt = (up_trgt - up_face) * (1 - epsilon) + up_face
mesh_ = np.concatenate(
[sheet.face_df[coords].values, up_srce.values, up_trgt.values]
)
triangles = np.vstack(
[sheet.edge_df["face"], np.arange(Ne) + Nf, np.arange(Ne) + Ne + Nf]
).T.astype(dtype=np.uint32)
mesh = ipv.Mesh(
x=mesh_[:, 0], y=mesh_[:, 1], z=mesh_[:, 2], triangles=triangles, color=color
)
return mesh
def _wire_color_from_sequence(edge_spec, sheet):
"""
"""
color_ = edge_spec["color"]
cmap = cm.get_cmap(edge_spec.get("colormap", "viridis"))
if color_.shape in [(sheet.Nv, 3), (sheet.Nv, 4)]:
return np.asarray(color_)
if color_.shape == (sheet.Nv,):
if np.ptp(color_) < 1e-10:
return np.ones((sheet.Nv, 3)) * 0.7
return cmap((color_ - color_.min()) / np.ptp(color_))
if color_.shape in [(sheet.Ne, 3), (sheet.Ne, 4)]:
color_ = pd.DataFrame(color_, index=sheet.edge_df.index)
color_["srce"] = sheet.edge_df["srce"]
color_ = color_.groupby("srce").mean().values
return color_
if color_.shape == (sheet.Ne,):
color_ = pd.DataFrame(color_, index=sheet.edge_df.index)
color_["srce"] = sheet.edge_df["srce"]
color_ = color_.groupby("srce").mean().values.ravel()
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Nv, 3)) * 0.7
return cmap((color_ - color_.min()) / np.ptp(color_))
else:
raise ValueError("The 'color' value of the spec doesn't have a correct shape.")
def _face_color_from_sequence(face_spec, sheet):
color_ = face_spec["color"]
cmap = cm.get_cmap(face_spec.get("colormap", "viridis"))
Nf, Ne = sheet.Nf, sheet.Ne
color_min, color_max = face_spec.get("color_range", (color_.min(), color_.max()))
face_mesh_shape = Nf + 2 * Ne
if color_.shape in [(sheet.Nf, 3), (sheet.Nf, 4)]:
return np.concatenate([color_, color_, color_])
elif color_.shape == (sheet.Nf,):
if np.ptp(color_) < 1e-10:
# warnings.warn("Attempting to draw a colormap with a uniform value")
return np.ones((face_mesh_shape, 3)) * 0.5
normed = (color_ - color_min) / (color_max - color_min)
up_color = sheet.upcast_face(normed).values
return cmap(np.concatenate([normed, up_color, up_color]))
else:
raise ValueError(
"shape of `face_spec['color']` must be either (Nf, 3), (Nf, 4) or (Nf,)"
)
def _get_meshes(sheet, coords, draw_specs):
meshes = []
edge_spec = draw_specs["edge"]
if edge_spec["visible"]:
edges = edge_mesh(sheet, coords, **edge_spec)
meshes.append(edges)
else:
edges = None
face_spec = draw_specs["face"]
if face_spec["visible"]:
faces = face_mesh(sheet, coords, **face_spec)
meshes.append(faces)
else:
faces = None
return meshes
| gpl-3.0 |
pglomski/shopnotes | drill_speed_chart.py | 1 | 2778 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Produce a custom twist drill plot'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# set some rcParams
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.major.pad'] = 10
mpl.rcParams['xtick.direction'] = 'inout'
mpl.rcParams['xtick.labelsize'] = 26
mpl.rcParams['ytick.direction'] = 'inout'
mpl.rcParams['ytick.labelsize'] = 20
# define the constants for our chart
materials = [
('Acrylic' , 650 , 'c' , '-' ) ,
('Aluminum' , 300 , 'b' , '-' ) ,
('Brass' , 200 , 'g' , '-' ) ,
('LC Steel' , 110 , 'k' , '-' ) ,
('Wood' , 100 , 'brown' , '-' ) ,
('MC Steel' , 80 , 'darkgray' , '-' ) ,
('HC Steel' , 60 , 'lightgray' , '-' ) ,
('Stainless' , 50 , 'purple' , '-' ) ,
]
drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm
speed_lims = (200., 4000.) # rpm
max_in = 1. # in.
incr = 1./16. # in.
im_sz = 25. # in.
ratio = 8.5/11.
fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600)
fig.patch.set_alpha(0)
# generate a vector of drill bit diameter
x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in.
# calculate the drill speed curve for each material type and plot the curve
for name, speed, color, linestyle in materials:
plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle)
ax = plt.gca()
# adjust the axis tick locators to match drill press speeds
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d'))
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_ylim(speed_lims)
# set the drill diameter locators and format the ticks with LaTeX
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr))
ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim((incr, max_in))
ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' ,
r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' ,
r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' ,
r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ]
ax.xaxis.set_ticklabels(ticks)
# Add the Texts
plt.xlabel('Bit Diameter (in.)', fontsize=26)
plt.ylabel('Drill Speed (rpm)' , fontsize=26)
plt.title('Twist Drill Speeds' , fontsize=50)
plt.legend(ncol=2, loc=3, fontsize=40)
plt.grid('on')
plt.savefig('drill_speed_chart.png')
| agpl-3.0 |
BillyLiggins/fitting | first.py | 1 | 7031 | import copy
import echidna
import echidna.output.plot as plot
import echidna.core.spectra as spectra
from echidna.output import store
import matplotlib.pyplot as plt
import argparse
import glob
import numpy as np
import os
def convertor(path):
flist=np.array(glob.glob(path))
for ntuple in flist:
os.system("python ~/echidna/echidna/scripts/dump_spectra_ntuple.py -c ~/workspace/PhD/fitting/config.yml -f "+ str(ntuple)+" -s hdf5/")
def combinerNtuple(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.fill_from_ntuple(hdf5)
first = False
else:
spectrum2 = store.fill_from_ntuple(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
def combiner(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.load(hdf5)
first = False
else:
spectrum2 = store.load(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
"""The way you should do it is to define a lot of spectra and then plot them.
You don't really know how to normlise the histrogram or indeed weather that is of any uses in the first
place.
"""
def slicer(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 0.6,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name=str(i*1000)+"mm to "+str((i+1)*1000)+"mm"
print type(spec2)
filler.append(spec2)
def slicerMC(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_mc_low": 0.,
"energy_mc_high": 1,
"radial_mc_low": i*6000.0/nslice,
"radial_mc_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="MC"
print type(spec2)
print "This gives the number os events in each window:"
print "mc : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def slicerReco(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 1.,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="Reco"
print type(spec2)
print "This gives the number os events in each window:"
print "reco : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def signalPlotter(spectra,dim,name):
i=0
for spec in spectra:
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel=str(dim)+" [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project(dim),histtype="stepfilled", color="RoyalBlue",label=spec._name)
fig.savefig("slice_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
i=1+i
def combiPlotter(spectra,dim,name):
i=0
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
for spec in spectra:
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in 1000mm slices",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="energy_reco"+ " [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project("energy_reco"),label=spec._name,histtype='step')
ax.set_ylim([0,0.03])
ax.set_xlim([0.2,0.7])
ax.legend(loc="best")
fig.savefig("combined_"+str(name)+".png")
def func(path,nslice,name):
spectra=[]
slicer(path,spectra,nslice)
signalPlotter(spectra,"energy_reco",name)
combiPlotter(spectra,"energy_reco",name)
def po210():
convertor("po210_ntuple/*")
combiner("hdf5/SolarPo**ntuple*","hdf5/SolarPo210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarPo210_combined.hdf5",6,"po210")
def bi210():
convertor("bi210_ntuple/*")
combiner("hdf5/SolarBi**ntuple*","hdf5/SolarBi210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarBi210_combined.hdf5",6,"bi210")
def compair(spectrumPathReco,spectrumPathMC,name):
spectraReco=[]
spectraMC=[]
slicerReco(spectrumPathReco,spectraReco,6)
slicerMC(spectrumPathMC,spectraMC,6)
for i in range(0,len(spectraReco)):
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spectraReco[i].get_config().get_par("energy_reco")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="Energy [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spectraReco[i].project("energy_reco"),histtype="stepfilled",label=spectraReco[i]._name)
par = spectraMC[i].get_config().get_par("energy_mc")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.hist(x,bins,weights=spectraMC[i].project("energy_mc"),histtype="stepfilled",label=spectraMC[i]._name,alpha=0.75)
ax.legend(loc=2)
fig.savefig("compare_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
if __name__=="__main__":
print "You need to compare the recon against the mc"
print "You should bin in bigger bins becuase you could then bin in 4d"
"""You need to plot the standard spectra"""
| mit |
RachitKansal/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
aabadie/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/extension/test_numpy.py | 2 | 12536 | import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p16
import pandas as pd
from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
import pandas.util.testing as tm
from . import base
@pytest.fixture(params=["float", "object"])
def dtype(request):
return PandasDtype(np.dtype(request.param))
@pytest.fixture
def allow_in_pandas(monkeypatch):
"""
A monkeypatch to tells pandas to let us in.
By default, passing a PandasArray to an index / series / frame
constructor will unbox that PandasArray to an ndarray, and treat
it as a non-EA column. We don't want people using EAs without
reason.
The mechanism for this is a check against ABCPandasArray
in each constructor.
But, for testing, we need to allow them in pandas. So we patch
the _typ of PandasArray, so that we evade the ABCPandasArray
check.
"""
with monkeypatch.context() as m:
m.setattr(PandasArray, "_typ", "extension")
yield
@pytest.fixture
def data(allow_in_pandas, dtype):
if dtype.numpy_dtype == "object":
return pd.Series([(i,) for i in range(100)]).array
return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@pytest.fixture
def data_missing(allow_in_pandas, dtype):
# For NumPy <1.16, np.array([np.nan, (1,)]) raises
# ValueError: setting an array element with a sequence.
if dtype.numpy_dtype == "object":
if _np_version_under1p16:
raise pytest.skip("Skipping for NumPy <1.16")
return PandasArray(np.array([np.nan, (1,)]))
return PandasArray(np.array([np.nan, 1.0]))
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
def cmp(a, b):
return np.isnan(a) and np.isnan(b)
return cmp
@pytest.fixture
def data_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
if dtype.numpy_dtype == "object":
# Use an empty tuple for first element, then remove,
# to disable np.array's shape inference.
return PandasArray(np.array([(), (2,), (3,), (1,)])[1:])
return PandasArray(np.array([1, 2, 0]))
@pytest.fixture
def data_missing_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
if dtype.numpy_dtype == "object":
return PandasArray(np.array([(1,), np.nan, (0,)]))
return PandasArray(np.array([1, np.nan, 0]))
@pytest.fixture
def data_for_grouping(allow_in_pandas, dtype):
"""Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
if dtype.numpy_dtype == "object":
a, b, c = (1,), (2,), (3,)
else:
a, b, c = np.arange(3)
return PandasArray(np.array([b, b, np.nan, np.nan, a, a, b, c]))
@pytest.fixture
def skip_numpy_object(dtype):
"""
Tests for PandasArray with nested data. Users typically won't create
these objects via `pd.array`, but they can show up through `.array`
on a Series with nested data. Many of the base tests fail, as they aren't
appropriate for nested data.
This fixture allows these tests to be skipped when used as a usefixtures
marker to either an individual test or a test class.
"""
if dtype == "object":
raise pytest.skip("Skipping for object dtype.")
skip_nested = pytest.mark.usefixtures("skip_numpy_object")
class BaseNumPyTests:
pass
class TestCasting(BaseNumPyTests, base.BaseCastingTests):
@skip_nested
def test_astype_str(self, data):
# ValueError: setting an array element with a sequence
super().test_astype_str(data)
class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
@pytest.mark.skip(reason="We don't register our dtype")
# We don't want to register. This test should probably be split in two.
def test_from_dtype(self, data):
pass
@skip_nested
def test_array_from_scalars(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_array_from_scalars(data)
class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
@pytest.mark.skip(reason="Incorrect expected.")
# we unsurprisingly clash with a NumPy name.
def test_check_dtype(self, data):
pass
class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
@skip_nested
def test_getitem_scalar(self, data):
# AssertionError
super().test_getitem_scalar(data)
@skip_nested
def test_take_series(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_take_series(data)
@pytest.mark.xfail(reason="astype doesn't recognize data.dtype")
def test_loc_iloc_frame_single_dtype(self, data):
super().test_loc_iloc_frame_single_dtype(data)
class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
@skip_nested
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
# ValueError: Names should be list-like for a MultiIndex
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
@skip_nested
def test_array_interface(self, data):
# NumPy array shape inference
super().test_array_interface(data)
class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
@pytest.mark.skip(reason="TODO: remove?")
def test_value_counts(self, all_data, dropna):
pass
@pytest.mark.skip(reason="Incorrect expected")
# We have a bool dtype, so the result is an ExtensionArray
# but expected is not
def test_combine_le(self, data_repeated):
super().test_combine_le(data_repeated)
@skip_nested
def test_combine_add(self, data_repeated):
# Not numeric
super().test_combine_add(data_repeated)
@skip_nested
def test_shift_fill_value(self, data):
# np.array shape inference. Shift implementation fails.
super().test_shift_fill_value(data)
@skip_nested
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
# Fails creating expected
super().test_unique(data, box, method)
@skip_nested
def test_fillna_copy_frame(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_frame(data_missing)
@skip_nested
def test_fillna_copy_series(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_series(data_missing)
@skip_nested
def test_hash_pandas_object_works(self, data, as_frame):
# ndarray of tuples not hashable
super().test_hash_pandas_object_works(data, as_frame)
@skip_nested
def test_searchsorted(self, data_for_sorting, as_series):
# Test setup fails.
super().test_searchsorted(data_for_sorting, as_series)
@skip_nested
def test_where_series(self, data, na_value, as_frame):
# Test setup fails.
super().test_where_series(data, na_value, as_frame)
@skip_nested
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
def test_repeat(self, data, repeats, as_series, use_numpy):
# Fails creating expected
super().test_repeat(data, repeats, as_series, use_numpy)
@skip_nested
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
divmod_exc = None
series_scalar_exc = None
frame_scalar_exc = None
series_array_exc = None
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data, exc=None)
@pytest.mark.skip("We implement ops")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
super().test_arith_series_with_array(data, all_arithmetic_operators)
class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
pass
@skip_nested
class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
# avoid coercing int -> float. Just cast to the actual numpy type.
expected = getattr(s.astype(s.dtype._dtype), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
@skip_nested
class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests):
pass
class TestMissing(BaseNumPyTests, base.BaseMissingTests):
@skip_nested
def test_fillna_scalar(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_scalar(data_missing)
@skip_nested
def test_fillna_series_method(self, data_missing, fillna_method):
# Non-scalar "scalar" values.
super().test_fillna_series_method(data_missing, fillna_method)
@skip_nested
def test_fillna_series(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_series(data_missing)
@skip_nested
def test_fillna_frame(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_frame(data_missing)
class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
@pytest.mark.skip("Incorrect parent test")
# not actually a mixed concat, since we concat int and int.
def test_concat_mixed_dtypes(self, data):
super().test_concat_mixed_dtypes(data)
@skip_nested
def test_merge(self, data, na_value):
# Fails creating expected
super().test_merge(data, na_value)
@skip_nested
def test_merge_on_extension_array(self, data):
# Fails creating expected
super().test_merge_on_extension_array(data)
@skip_nested
def test_merge_on_extension_array_duplicates(self, data):
# Fails creating expected
super().test_merge_on_extension_array_duplicates(data)
class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
@skip_nested
def test_setitem_scalar_series(self, data, box_in_series):
# AssertionError
super().test_setitem_scalar_series(data, box_in_series)
@skip_nested
def test_setitem_sequence(self, data, box_in_series):
# ValueError: shape mismatch: value array of shape (2,1) could not
# be broadcast to indexing result of shape (2,)
super().test_setitem_sequence(data, box_in_series)
@skip_nested
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
# ValueError: PandasArray must be 1-dimensional.
super().test_setitem_sequence_mismatched_length_raises(data, as_array)
@skip_nested
def test_setitem_sequence_broadcasts(self, data, box_in_series):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_sequence_broadcasts(data, box_in_series)
@skip_nested
def test_setitem_loc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_loc_scalar_mixed(data)
@skip_nested
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_loc_scalar_multiple_homogoneous(data)
@skip_nested
def test_setitem_iloc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_iloc_scalar_mixed(data)
@skip_nested
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_iloc_scalar_multiple_homogoneous(data)
@skip_nested
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_mask_broadcast(data, setter)
@skip_nested
def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
@skip_nested
class TestParsing(BaseNumPyTests, base.BaseParsingTests):
pass
| apache-2.0 |
ml-lab/pylearn2 | pylearn2/models/tests/test_s3c_inference.py | 4 | 14275 | from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.models.s3c import E_Step
from theano import function
import numpy as np
import theano.tensor as T
from theano import config
#from pylearn2.utils import serial
import warnings
def broadcast(mat, shape_0):
rval = mat
if mat.shape[0] != shape_0:
assert mat.shape[0] == 1
rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)
for i in xrange(shape_0):
rval[i,:] = mat[0,:]
return rval
class Test_S3C_Inference:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c inference
# tests currently fail due to numerical issues for float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
#dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')
#X = dataset.get_batch_design(1000)
#X = X[:,0:5]
X = np.random.RandomState([1,2,3]).randn(1000,5)
X -= X.mean()
X /= X.std()
m, D = X.shape
N = 5
#don't give the model an e_step or learning rate so it won't spend years compiling a learn_func
self.model = S3C(nvis = D,
nhid = N,
irange = .1,
init_bias_hid = 0.,
init_B = 3.,
min_B = 1e-8,
max_B = 1000.,
init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
init_mu = 1., e_step = None,
m_step = Grad_M_Step(),
min_bias_hid = -1e30, max_bias_hid = 1e30,
)
self.model.make_pseudoparams()
self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]
self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)
self.e_step.register_model(self.model)
self.X = X
self.N = N
self.m = m
finally:
config.floatX = self.prev_floatX
def test_match_unrolled(self):
""" tests that inference with scan matches result using unrolled loops """
unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)
unrolled_e_step.register_model(self.model)
V = T.matrix()
scan_result = self.e_step.infer(V)
unrolled_result = unrolled_e_step.infer(V)
outputs = []
for key in scan_result:
outputs.append(scan_result[key])
outputs.append(unrolled_result[key])
f = function([V], outputs)
outputs = f(self.X)
assert len(outputs) % 2 == 0
for i in xrange(0,len(outputs),2):
assert np.allclose(outputs[i],outputs[i+1])
def test_grad_s(self):
"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
model.test_batch_size = X.shape[0]
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)
grad_Mu1_idx = grad_Mu1[:,idx]
grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)
for i in xrange(self.N):
Mu1[:,i] = s_i_func(H, Mu1, i)
g = grad_func(H,Mu1,i)
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))
def test_value_s(self):
"tests that the value of the kl divergence decreases with each update to s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
Mu1[:,i] = s_i_func(H, Mu1, i)
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
mx = increase.max()
if mx > 1e-3:
raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))
def test_grad_h(self):
"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = new_H[:,idx]
updates_func = function([H_var,Mu1_var,idx], h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,
var_s1_hat = Sigma1)
grad_H = T.grad(trunc_kl.sum(), H_var)
assert len(grad_H.type.broadcastable) == 2
#from theano.printing import min_informative_str
#print min_informative_str(grad_H)
#grad_H = Print('grad_H')(grad_H)
#grad_H_idx = grad_H[:,idx]
grad_func = function([H_var, Mu1_var], grad_H)
failed = False
for i in xrange(self.N):
rval = updates_func(H, Mu1, i)
H[:,i] = rval
g = grad_func(H,Mu1)[:,i]
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
#print "new values of H"
#print H[:,i]
#print "gradient on new values of H"
#print g
failed = True
print 'iteration ',i
#print 'max value of new H: ',H[:,i].max()
#print 'H for failing g: '
failing_h = H[np.abs(g) > self.tol, i]
#print failing_h
#from matplotlib import pyplot as plt
#plt.scatter(H[:,i],g)
#plt.show()
#ignore failures extremely close to h=1
high_mask = failing_h > .001
low_mask = failing_h < .999
mask = high_mask * low_mask
print 'masked failures: ',mask.shape[0],' err ',g_abs_max
if mask.sum() > 0:
print 'failing h passing the range mask'
print failing_h[ mask.astype(bool) ]
raise Exception('after mean field step, gradient of kl divergence'
' wrt freshly updated variational parameter should be 0, '
'but here the max magnitude of a gradient element is '
+str(g_abs_max)+' after updating h_'+str(i))
#assert not failed
def test_value_h(self):
"tests that the value of the kl divergence decreases with each update to h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = newH[:,idx]
h_i_func = function([H_var,Mu1_var,idx],h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
H[:,i] = h_i_func(H, Mu1, i)
#we don't update mu, the whole point of the split e step is we don't have to
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
print 'failures after iteration ',i,': ',(increase > self.tol).sum()
mx = increase.max()
if mx > 1e-4:
print 'increase amounts of failing examples:'
print increase[increase > self.tol]
print 'failing H:'
print H[increase > self.tol,:]
print 'failing Mu1:'
print Mu1[increase > self.tol,:]
print 'failing V:'
print X[increase > self.tol,:]
raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))
if __name__ == '__main__':
obj = Test_S3C_Inference()
#obj.test_grad_h()
#obj.test_grad_s()
#obj.test_value_s()
obj.test_value_h()
| bsd-3-clause |
rabernat/xrft | setup.py | 1 | 1391 | import os
import versioneer
from setuptools import setup, find_packages
PACKAGES = find_packages()
DISTNAME = 'xrft'
LICENSE = 'MIT'
AUTHOR = 'xrft Developers'
AUTHOR_EMAIL = 'takaya@ldeo.columbia.edu'
URL = 'https://github.com/xgcm/xrft'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['xarray', 'dask', 'numpy', 'pandas', 'scipy']
EXTRAS_REQUIRE = ['cftime']
SETUP_REQUIRES = ['pytest-runner']
TESTS_REQUIRE = ['pytest >= 2.8', 'coverage']
DESCRIPTION = "Discrete Fourier Transform with xarray"
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages())
| mit |
zorojean/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
xyguo/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
einarhuseby/arctic | tests/integration/test_arctic.py | 4 | 6898 | from datetime import datetime as dt, timedelta as dtd
from mock import patch
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import pytest
import time
import numpy as np
from arctic.arctic import Arctic, VERSION_STORE
from arctic.exceptions import LibraryNotFoundException, QuotaExceededException
from ..util import get_large_ts
def test_connect_to_Arctic_string(mongo_host):
arctic = Arctic(mongo_host=mongo_host)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_connect_to_Arctic_connection(mongodb, mongo_host):
arctic = Arctic(mongodb)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_simple(library):
sym = 'symbol'
data = get_large_ts(100)
library.write(sym, data)
orig = dt.now()
time.sleep(1) # Move the timestamp on 1ms
data2 = get_large_ts(100)
library.write(sym, data2, prune_previous_version=False)
# Get the timeseries, it should be the same
read2 = library.read(sym).data
assert_frame_equal(read2, data2)
# Ensure we can get the previous version
read = library.read(sym, as_of=orig).data
assert_frame_equal(read, data)
def test_indexes(arctic):
c = arctic._conn
arctic.initialize_library("library", VERSION_STORE, segment='month')
chunk = c.arctic.library.index_information()
assert chunk == {u'_id_': {u'key': [(u'_id', 1)], u'ns': u'arctic.library', u'v': 1},
u'symbol_1_parent_1_segment_1': {u'background': True,
u'key': [(u'symbol', 1),
(u'parent', 1),
(u'segment', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_1_sha_1': {u'background': True,
u'key': [(u'symbol', 1), (u'sha', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_hashed': {u'background': True,
u'key': [(u'symbol', u'hashed')],
u'ns': u'arctic.library',
u'v': 1}}
snapshots = c.arctic.library.snapshots.index_information()
assert snapshots == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.snapshots',
u'v': 1},
u'name_1': {u'background': True,
u'key': [(u'name', 1)],
u'ns': u'arctic.library.snapshots',
u'unique': True,
u'v': 1}}
versions = c.arctic.library.versions.index_information()
assert versions == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1__id_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'_id', -1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1_version_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'version', -1)],
u'ns': u'arctic.library.versions',
u'unique': True,
u'v': 1}}
version_nums = c.arctic.library.version_nums.index_information()
assert version_nums == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.version_nums',
u'v': 1},
u'symbol_1': {u'background': True,
u'key': [(u'symbol', 1)],
u'ns': u'arctic.library.version_nums',
u'unique': True,
u'v': 1}}
def test_delete_library(arctic, library, library_name):
mongo = arctic._conn
# create a library2 library too - ensure that this isn't deleted
arctic.initialize_library('user.library2', VERSION_STORE, segment='month')
library.write('asdf', get_large_ts(1))
assert 'TEST' in mongo.arctic_test.collection_names()
assert 'TEST.versions' in mongo.arctic_test.collection_names()
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
arctic.delete_library(library_name)
assert 'TEST' not in mongo.arctic_user.collection_names()
assert 'TEST.versions' not in mongo.arctic_user.collection_names()
with pytest.raises(LibraryNotFoundException):
arctic[library_name]
with pytest.raises(LibraryNotFoundException):
arctic['arctic_{}'.format(library_name)]
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
def test_quota(arctic, library, library_name):
thing = list(range(100))
library._arctic_lib.set_quota(10)
assert arctic.get_quota(library_name) == 10
assert library._arctic_lib.get_quota() == 10
library.write('thing', thing)
with pytest.raises(QuotaExceededException):
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
with pytest.raises(QuotaExceededException):
arctic.check_quota(library_name)
def test_check_quota(arctic, library, library_name):
with patch('arctic.arctic.logger.info') as info:
arctic.check_quota(library_name)
assert info.call_count == 1
def test_default_mongo_retry_timout():
now = time.time()
with pytest.raises(LibraryNotFoundException):
Arctic('unresolved-host', serverSelectionTimeoutMS=0)['some.lib']
assert time.time() - now < 1.
| lgpl-2.1 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/stats/tests/test_math.py | 9 | 1836 | import nose
from datetime import datetime
from numpy.random import randn
import numpy as np
from pandas.core.api import Series, DataFrame, date_range
import pandas.util.testing as tm
import pandas.stats.math as pmath
from pandas import ols
N, K = 100, 10
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm # noqa
except ImportError:
_have_statsmodels = False
class TestMath(tm.TestCase):
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def setUp(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = date_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
def test_rank_1d(self):
self.assertEqual(1, pmath.rank(self.series))
self.assertEqual(0, pmath.rank(Series(0, self.series.index)))
def test_solve_rect(self):
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = ols(y=b, x=self.frame, intercept=False).beta
self.assertTrue(np.allclose(result, expected))
def test_inv_illformed(self):
singular = DataFrame(np.array([[1, 1], [2, 2]]))
rs = pmath.inv(singular)
expected = np.array([[0.1, 0.2], [0.1, 0.2]])
self.assertTrue(np.allclose(rs, expected))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
jseabold/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
comprna/SUPPA | scripts/generate_boxplot_event.py | 1 | 5584 | # The next script will format a phenotype table (junctions, events, trasncripts...)
# for runnning FastQTL analysis
#This version is for formatting the SCLC phenotype
"""
@authors: Juan L. Trincado
@email: juanluis.trincado@upf.edu
generate_boxplot_event.py: Generates a boxplot with the PSI values, given which samples are in which conditions
"""
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import re
from argparse import ArgumentParser, RawTextHelpFormatter
description = \
"Description:\n\n" + \
"This script accept a phenotype table (junctions, events, transcripts...)\n" + \
"and a genotype table (mutations associated to K-mers or SMRs) and returns a formatted table\n" + \
"for using with FastQTL"
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter,
add_help=True)
parser.add_argument("-i", "--input", required=True,
help="Input file")
parser.add_argument("-e", "--event", required=True, type=str,
help="Event to plot")
parser.add_argument('-g', '--groups',
action="store",
required=True,
type=str,
nargs="*",
help="Ranges of column numbers specifying the replicates per condition. "
"Column numbers have to be continuous, with no overlapping or missing columns between them. "
"Ex: 1-3,4-6")
parser.add_argument('-c', '--conds',
action="store",
required=False,
default="0",
type=str,
nargs="*",
help="Name of each one of the conditions. Ex: Mutated,Non_mutated")
parser.add_argument("-o", "--output", required=True,
help="Output path")
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def main():
args = parser.parse_args()
input_file = args.input
event = args.event
groups = re.findall(r"[\w]+", args.groups[0])
output_path = args.output
# input_file = "/home/juanluis/Desktop/Work/Master_class/events.psi"
# event = "ENSG00000149554;SE:chr11:125496728-125497502:125497725-125499127:+"
# groups = ['1','3','4','6']
# output_path = "/home/juanluis/Desktop/Work/Master_class/"
try:
logger.info("Reading input file...")
dict_PSI = {}
cond = 1
success = False
file = open(input_file)
for line in file:
tokens = line.rstrip().split("\t")
if (tokens[0]==event):
success = True
for i,x in enumerate(groups):
if(i%2==1):
continue
PSI = []
samples = range(int(groups[i]),int(groups[i+1])+1)
#Get the PSI of this group of samples
for j in samples:
PSI.append(tokens[j])
dict_PSI[cond] = PSI
cond = cond + 1
break
if(success):
#Create the boxplot
data_to_plot = []
for key in dict_PSI.keys():
data_to_plot.append(list(map(float,dict_PSI[key])))
# Create a figure instance
fig = plt.figure(figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot, patch_artist=True, sym='')
# change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='.', color='#000000', alpha=0.7)
# Assign different colors
colors = ['lightblue', 'pink']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
for j in range(len(data_to_plot)):
y = data_to_plot[j]
x = np.random.normal(1 + j, 0.02, size=len(y))
plt.plot(x, y, 'ko', alpha=0.5)
# Custom x-axis labels if the user has input conditions
if (args.conds != "0"):
conditions = re.findall(r"[\w]+", args.conds[0])
ax.set_xticklabels(conditions)
# Leave just ticks in the bottom
ax.get_xaxis().tick_bottom()
ax.set_ylabel('PSI')
# Set the title
title = "Event: " + event
ax.set_title(title, fontsize=10)
# Add a horizontal grid to the plot,
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# Set the limits for the y axes
ax.set_ylim([-0.05, 1.05])
# Save the figure
output_path = output_path + "/" + event + ".png"
logger.info("Created " + output_path)
fig.savefig(output_path, bbox_inches='tight')
else:
logger.info("Event not found.")
logger.info("Done.")
exit(0)
except Exception as error:
logger.error(repr(error))
logger.error("Aborting execution")
sys.exit(1)
if __name__ == '__main__':
main() | mit |
macioosch/dynamo-hard-spheres-sim | convergence-plot.py | 1 | 6346 | #!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from glob import glob
from itertools import izip
from matplotlib import pyplot as plt
import numpy as np
input_files = glob("csv/convergence-256000-0.*.csv")
#input_files = glob("csv/convergence-500000-0.*.csv")
#input_files = glob("csv/convergence-1000188-0.*.csv")
#plotted_parameter = "msds_diffusion"
plotted_parameter = "pressures_collision"
#plotted_parameter = "pressures_virial"
#plotted_parameter = "msds_val"
#plotted_parameter = "times"
legend_names = []
tight_layout = False
show_legend = False
for file_number, file_name in enumerate(sorted(input_files)):
data = np.genfromtxt(file_name, delimiter='\t', names=[
"packings","densities","collisions","n_atoms","pressures_virial",
"pressures_collision","msds_val","msds_diffusion","times",
"std_pressures_virial","std_pressures_collision","std_msds_val",
"std_msds_diffusion","std_times"])
n_atoms = data["n_atoms"][0]
density = data["densities"][0]
equilibrated_collisions = data["collisions"] - 2*data["collisions"][0] \
+ data["collisions"][1]
"""
### 5 graphs: D(CPS) ###
tight_layout = True
skip_points = 0
ax = plt.subplot(3, 2, file_number+1)
plt.fill_between((equilibrated_collisions / n_atoms)[skip_points:],
data[plotted_parameter][skip_points:]
- data["std_" + plotted_parameter][skip_points:],
data[plotted_parameter][skip_points:]
+ data["std_" + plotted_parameter][skip_points:], alpha=0.3)
plt.plot((equilibrated_collisions / n_atoms)[skip_points:],
data[plotted_parameter][skip_points:], lw=2)
if plotted_parameter == "msds_diffusion":
plt.ylim(0.990*data[plotted_parameter][-1],
1.005*data[plotted_parameter][-1])
plt.xlim([0, 1e5])
plt.legend(["Density {}".format(data["densities"][0])], loc="lower right")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.4f'))
plt.xlabel("Collisions per sphere")
plt.ylabel("D")
"""
### 5 graphs: relative D(CPS) ###
tight_layout = True
skip_points = 0
ax = plt.subplot(3, 2, file_number+1)
plt.fill_between((equilibrated_collisions / n_atoms)[skip_points:],
-1 + (data[plotted_parameter][skip_points:]
- data["std_" + plotted_parameter][skip_points:])/data[plotted_parameter][-1],
-1 + (data[plotted_parameter][skip_points:]
+ data["std_" + plotted_parameter][skip_points:])/data[plotted_parameter][-1], alpha=0.3)
plt.plot((equilibrated_collisions / n_atoms)[skip_points:],
-1 + data[plotted_parameter][skip_points:]/data[plotted_parameter][-1], lw=2)
plt.ylim(data["std_" + plotted_parameter][-1]*20*np.array([-1, 1])/data[plotted_parameter][-1])
#plt.xscale("log")
plt.xlim([0, 1e5])
plt.legend(["$\\rho\\sigma^3=\\ {}$".format(data["densities"][0])], loc="lower right")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.2e'))
plt.xlabel("$C/N$")
plt.ylabel("$[Z_{MD}(C) / Z_{MD}(C=10^5 N)] - 1$")
"""
### 1 graph: D(t) ###
show_legend = True
skip_points = 0
plt.title("D(t) for 5 densities")
plt.loglog(data["times"][skip_points:],
data[plotted_parameter][skip_points:])
legend_names.append(data["densities"][0])
plt.xlabel("Time")
plt.ylabel("D")
"""
"""
### 1 graph: D(t) / Dinf ###
show_legend = True
skip_points = 0
#plt.fill_between(data["times"][skip_points:],
# (data[plotted_parameter] - data["std_" + plotted_parameter])
# / data[plotted_parameter][-1] - 1,
# (data[plotted_parameter] + data["std_" + plotted_parameter])
# / data[plotted_parameter][-1] - 1, color="grey", alpha=0.4)
plt.plot(data["times"][skip_points:],
data[plotted_parameter] / data[plotted_parameter][-1] - 1, lw=1)
legend_names.append(data["densities"][0])
#plt.xscale("log")
plt.xlabel("Time")
plt.ylabel("D / D(t --> inf)")
"""
"""
### 5 graphs: D(1/CPS) ###
tight_layout = True
skip_points = 40
ax = plt.subplot(3, 2, file_number+1)
plt.fill_between((n_atoms / equilibrated_collisions)[skip_points:],
data[plotted_parameter][skip_points:]
- data["std_" + plotted_parameter][skip_points:],
data[plotted_parameter][skip_points:]
+ data["std_" + plotted_parameter][skip_points:], alpha=0.3)
plt.plot((n_atoms / equilibrated_collisions)[skip_points:],
data[plotted_parameter][skip_points:], lw=2)
plt.title("Density {}:".format(data["densities"][0]))
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.7f'))
plt.xlim(xmin=0)
plt.xlabel("1 / Collisions per sphere")
plt.ylabel("D")
"""
"""
### 1 graph: D(CPS) / Dinf ###
show_legend = True
plt.fill_between(equilibrated_collisions / n_atoms,
(data[plotted_parameter] - data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1,
(data[plotted_parameter] + data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1, color="grey", alpha=0.4)
plt.plot(equilibrated_collisions / n_atoms,
data[plotted_parameter] / data[plotted_parameter][-1] - 1, lw=2)
legend_names.append(data["densities"][0])
plt.xlabel("Collisions per sphere")
plt.ylabel("D / D(t --> inf)")
"""
"""
### 1 graph: D(1/CPS) / Dinf ###
show_legend = True
plt.fill_between(n_atoms / equilibrated_collisions,
(data[plotted_parameter] - data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1,
(data[plotted_parameter] + data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1, color="grey", alpha=0.4)
plt.plot( n_atoms / equilibrated_collisions,
data[plotted_parameter] / data[plotted_parameter][-1] - 1)
legend_names.append(data["densities"][0])
plt.xlabel(" 1 / Collisions per sphere")
plt.ylabel(plotted_parameter)
"""
#if tight_layout:
# plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
if show_legend:
plt.legend(legend_names, title="Density:", loc="lower right")
plt.show()
| gpl-3.0 |
MartinDelzant/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
spacetelescope/stsci.tools | doc/source/conf.py | 1 | 7012 | # -*- coding: utf-8 -*-
#
# stsci.tools documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 7 13:09:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from stsci.tools import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stsci.tools'
copyright = u'2020, STScI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_static_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = ['py-modindex']
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'stsci.toolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'stsci.tools.tex', u'stsci.tools Documentation',
# u'SSB', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/',
(None, 'http://data.astropy.org/intersphinx/matplotlib.inv')),
'astropy': ('https://docs.astropy.org/en/stable/', None)
}
| bsd-3-clause |
rsheftel/pandas_market_calendars | tests/test_bse_calendar.py | 1 | 1192 | import datetime
import pandas as pd
import pytz
from pandas_market_calendars.exchange_calendar_bse import BSEExchangeCalendar, BSEClosedDay
def test_time_zone():
assert BSEExchangeCalendar().tz == pytz.timezone('Asia/Calcutta')
assert BSEExchangeCalendar().name == 'BSE'
def test_holidays():
bse_calendar = BSEExchangeCalendar()
trading_days = bse_calendar.valid_days(pd.Timestamp('2004-01-01'), pd.Timestamp('2018-12-31'))
for session_label in BSEClosedDay:
assert session_label not in trading_days
def test_open_close_time():
bse_calendar = BSEExchangeCalendar()
india_time_zone = pytz.timezone('Asia/Calcutta')
bse_schedule = bse_calendar.schedule(
start_date=india_time_zone.localize(datetime.datetime(2015, 1, 14)),
end_date=india_time_zone.localize(datetime.datetime(2015, 1, 16))
)
assert BSEExchangeCalendar.open_at_time(
schedule=bse_schedule,
timestamp=india_time_zone.localize(datetime.datetime(2015, 1, 14, 11, 0))
)
assert not BSEExchangeCalendar.open_at_time(
schedule=bse_schedule,
timestamp=india_time_zone.localize(datetime.datetime(2015, 1, 9, 12, 0))
)
| mit |
timqian/sms-tools | lectures/5-Sinusoidal-model/plots-code/sineModelAnal-flute.py | 24 | 1179 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/flute-A4.wav'))
w = np.blackman(601)
N = 1024
H = 150
t = -80
minSineDur = .1
maxnSines = 150
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur)
plt.figure(1, figsize=(9.5, 5))
maxplotfreq = 5000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (flute-A4.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-flute.png')
plt.show() | agpl-3.0 |
jm-begon/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
nolanliou/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 24 | 6638 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
cajal/pipeline | python/pipeline/utils/galvo_corrections.py | 5 | 13668 | """ Utilities for motion and raster correction of resonant scans. """
import numpy as np
from scipy import interpolate as interp
from scipy import signal
from scipy import ndimage
from ..exceptions import PipelineException
from ..utils.signal import mirrconv
def compute_raster_phase(image, temporal_fill_fraction):
""" Compute raster correction for bidirectional resonant scanners.
It shifts the even and odd rows of the image in the x axis to find the scan angle
that aligns them better. Positive raster phase will shift even rows to the right and
odd rows to the left (assuming first row is row 0).
:param np.array image: The image to be corrected.
:param float temporal_fill_fraction: Fraction of time during which the scan is
recording a line against the total time per line.
:return: An angle (in radians). Estimate of the mismatch angle between the expected
initial angle and the one recorded.
:rtype: float
"""
# Make sure image has even number of rows (so number of even and odd rows is the same)
image = image[:-1] if image.shape[0] % 2 == 1 else image
# Get some params
image_height, image_width = image.shape
skip_rows = round(image_height * 0.05) # rows near the top or bottom have artifacts
skip_cols = round(image_width * 0.10) # so do columns
# Create images with even and odd rows
even_rows = image[::2][skip_rows: -skip_rows]
odd_rows = image[1::2][skip_rows: -skip_rows]
# Scan angle at which each pixel was recorded.
max_angle = (np.pi / 2) * temporal_fill_fraction
scan_angles = np.linspace(-max_angle, max_angle, image_width + 2)[1:-1]
#sin_index = np.sin(scan_angles)
# Greedy search for the best raster phase: starts at coarse estimates and refines them
even_interp = interp.interp1d(scan_angles, even_rows, fill_value='extrapolate')
odd_interp = interp.interp1d(scan_angles, odd_rows, fill_value='extrapolate')
angle_shift = 0
for scale in [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
angle_shifts = angle_shift + scale * np.linspace(-9, 9, 19)
match_values = []
for new_angle_shift in angle_shifts:
shifted_evens = even_interp(scan_angles + new_angle_shift)
shifted_odds = odd_interp(scan_angles - new_angle_shift)
match_values.append(np.sum(shifted_evens[:, skip_cols: -skip_cols] *
shifted_odds[:, skip_cols: -skip_cols]))
angle_shift = angle_shifts[np.argmax(match_values)]
return angle_shift
def compute_motion_shifts(scan, template, in_place=True, num_threads=8):
""" Compute shifts in y and x for rigid subpixel motion correction.
Returns the number of pixels that each image in the scan was to the right (x_shift)
or below (y_shift) the template. Negative shifts mean the image was to the left or
above the template.
:param np.array scan: 2 or 3-dimensional scan (image_height, image_width[, num_frames]).
:param np.array template: 2-d template image. Each frame in scan is aligned to this.
:param bool in_place: Whether the scan can be overwritten.
:param int num_threads: Number of threads used for the ffts.
:returns: (y_shifts, x_shifts) Two arrays (num_frames) with the y, x motion shifts.
..note:: Based in imreg_dft.translation().
"""
import pyfftw
from imreg_dft import utils
# Add third dimension if scan is a single image
if scan.ndim == 2:
scan = np.expand_dims(scan, -1)
# Get some params
image_height, image_width, num_frames = scan.shape
taper = np.outer(signal.tukey(image_height, 0.2), signal.tukey(image_width, 0.2))
# Prepare fftw
frame = pyfftw.empty_aligned((image_height, image_width), dtype='complex64')
fft = pyfftw.builders.fft2(frame, threads=num_threads, overwrite_input=in_place,
avoid_copy=True)
ifft = pyfftw.builders.ifft2(frame, threads=num_threads, overwrite_input=in_place,
avoid_copy=True)
# Get fourier transform of template
template_freq = fft(template * taper).conj() # we only need the conjugate
abs_template_freq = abs(template_freq)
eps = abs_template_freq.max() * 1e-15
# Compute subpixel shifts per image
y_shifts = np.empty(num_frames)
x_shifts = np.empty(num_frames)
for i in range(num_frames):
# Compute correlation via cross power spectrum
image_freq = fft(scan[:, :, i] * taper)
cross_power = (image_freq * template_freq) / (abs(image_freq) * abs_template_freq + eps)
shifted_cross_power = np.fft.fftshift(abs(ifft(cross_power)))
# Get best shift
shifts = np.unravel_index(np.argmax(shifted_cross_power), shifted_cross_power.shape)
shifts = utils._interpolate(shifted_cross_power, shifts, rad=3)
# Map back to deviations from center
y_shifts[i] = shifts[0] - image_height // 2
x_shifts[i] = shifts[1] - image_width // 2
return y_shifts, x_shifts
def fix_outliers(y_shifts, x_shifts, max_y_shift=20, max_x_shift=20, method='median'):
""" Look for spikes in motion shifts and set them to a sensible value.
Reject any shift whose y or x shift is higher than max_y_shift/max_x_shift pixels
from the median/linear estimate/moving average. Outliers filled by interpolating
valid points; in the edges filled with the median/linear estimate/moving average.
:param np.array y_shifts/x_shifts: Shifts in y, x.
:param float max_y_shift/max_x_shifts: Number of pixels used as threshold to classify
a point as an outlier in y, x.
:param string method: One of 'mean' or 'trend'.
'median': Detect outliers as deviations from the median of the shifts.
'linear': Detect outliers as deviations from a line estimated from the shifts.
'trend': Detect outliers as deviations from the shift trend computed as a moving
average over the entire scan.
:returns: (y_shifts, x_shifts) Two arrays (num_frames) with the fixed motion shifts.
:returns: (outliers) A boolean array (num_frames) with True for outlier frames.
"""
# Basic checks
num_frames = len(y_shifts)
if num_frames < 5:
return y_shifts, x_shifts, np.full(num_frames, False)
# Copy shifts to avoid changing originals
y_shifts, x_shifts = y_shifts.copy(), x_shifts.copy()
# Detrend shifts
if method == 'median':
y_trend = np.median(y_shifts)
x_trend = np.median(x_shifts)
elif method == 'linear':
x_trend = _fit_robust_line(x_shifts)
y_trend = _fit_robust_line(y_shifts)
else: # trend
window_size = min(101, num_frames)
window_size -= 1 if window_size % 2 == 0 else 0
y_trend = mirrconv(y_shifts, np.ones(window_size) / window_size)
x_trend = mirrconv(x_shifts, np.ones(window_size) / window_size)
# Subtract trend from shifts
y_shifts -= y_trend
x_shifts -= x_trend
# Get outliers
outliers = np.logical_or(abs(y_shifts) > max_y_shift, abs(x_shifts) > max_x_shift)
# Interpolate outliers
num_outliers = np.sum(outliers)
if num_outliers < num_frames - 1: # at least two good points needed for interpolation
#indices = np.arange(len(x_shifts))
#y_shifts = np.interp(indices, indices[~outliers], y_shifts[~outliers], left=0, right=0)
#x_shifts = np.interp(indices, indices[~outliers], x_shifts[~outliers], left=0, right=0)
y_shifts[outliers] = 0
x_shifts[outliers] = 0
else:
print('Warning: {} out of {} frames were outliers.'.format(num_outliers, num_frames))
y_shifts = 0
x_shifts = 0
# Add trend back to shifts
y_shifts += y_trend
x_shifts += x_trend
return y_shifts, x_shifts, outliers
def _fit_robust_line(shifts):
""" Use a robust linear regression algorithm to fit a line to the data."""
from sklearn.linear_model import TheilSenRegressor
X = np.arange(len(shifts)).reshape(-1, 1)
y = shifts
model = TheilSenRegressor() # robust regression
model.fit(X, y)
line = model.predict(X)
return line
def correct_raster(scan, raster_phase, temporal_fill_fraction, in_place=True):
""" Raster correction for resonant scans.
Corrects multi-photon images in n-dimensional scans. Positive raster phase shifts
even lines to the left and odd lines to the right. Negative raster phase shifts even
lines to the right and odd lines to the left.
:param np.array scan: Volume with images to be corrected in the first two dimensions.
Works for 2-dimensions and up, usually (image_height, image_width, num_frames).
:param float raster_phase: Angle difference between expected and recorded scan angle.
:param float temporal_fill_fraction: Ratio between active acquisition and total
length of the scan line.
:param bool in_place: If True (default), the original array is modified in place.
:return: Raster-corrected scan.
:rtype: Same as scan if scan.dtype is subtype of np.float, else np.float32.
:raises: PipelineException
"""
# Basic checks
if not isinstance(scan, np.ndarray):
raise PipelineException('Scan needs to be a numpy array.')
if scan.ndim < 2:
raise PipelineException('Scan with less than 2 dimensions.')
# Assert scan is float
if not np.issubdtype(scan.dtype, np.floating):
print('Warning: Changing scan type from', str(scan.dtype), 'to np.float32')
scan = scan.astype(np.float32, copy=(not in_place))
elif not in_place:
scan = scan.copy() # copy it anyway preserving the original float dtype
# Get some dimensions
original_shape = scan.shape
image_height = original_shape[0]
image_width = original_shape[1]
# Scan angle at which each pixel was recorded.
max_angle = (np.pi / 2) * temporal_fill_fraction
scan_angles = np.linspace(-max_angle, max_angle, image_width + 2)[1:-1]
# We iterate over every image in the scan (first 2 dimensions). Same correction
# regardless of what channel, slice or frame they belong to.
reshaped_scan = np.reshape(scan, (image_height, image_width, -1))
num_images = reshaped_scan.shape[-1]
for i in range(num_images):
# Get current image
image = reshaped_scan[:, :, i]
# Correct even rows of the image (0, 2, ...)
interp_function = interp.interp1d(scan_angles, image[::2, :], bounds_error=False,
fill_value=0, copy=(not in_place))
reshaped_scan[::2, :, i] = interp_function(scan_angles + raster_phase)
# Correct odd rows of the image (1, 3, ...)
interp_function = interp.interp1d(scan_angles, image[1::2, :], bounds_error=False,
fill_value=0, copy=(not in_place))
reshaped_scan[1::2, :, i] = interp_function(scan_angles - raster_phase)
scan = np.reshape(reshaped_scan, original_shape)
return scan
def correct_motion(scan, x_shifts, y_shifts, in_place=True):
""" Motion correction for multi-photon scans.
Shifts each image in the scan x_shift pixels to the left and y_shift pixels up.
:param np.array scan: Volume with images to be corrected in the first two dimensions.
Works for 2-dimensions and up, usually (image_height, image_width, num_frames).
:param list/np.array x_shifts: 1-d array with x motion shifts for each image.
:param list/np.array y_shifts: 1-d array with x motion shifts for each image.
:param bool in_place: If True (default), the original array is modified in place.
:return: Motion corrected scan
:rtype: Same as scan if scan.dtype is subtype of np.float, else np.float32.
:raises: PipelineException
"""
# Basic checks
if not isinstance(scan, np.ndarray):
raise PipelineException('Scan needs to be a numpy array.')
if scan.ndim < 2:
raise PipelineException('Scan with less than 2 dimensions.')
if np.ndim(y_shifts) != 1 or np.ndim(x_shifts) != 1:
raise PipelineException('Dimension of one or both motion arrays differs from 1.')
if len(x_shifts) != len(y_shifts):
raise PipelineException('Length of motion arrays differ.')
# Assert scan is float (integer precision is not good enough)
if not np.issubdtype(scan.dtype, np.floating):
print('Warning: Changing scan type from', str(scan.dtype), 'to np.float32')
scan = scan.astype(np.float32, copy=(not in_place))
elif not in_place:
scan = scan.copy() # copy it anyway preserving the original dtype
# Get some dimensions
original_shape = scan.shape
image_height = original_shape[0]
image_width = original_shape[1]
# Reshape input (to deal with more than 2-D volumes)
reshaped_scan = np.reshape(scan, (image_height, image_width, -1))
if reshaped_scan.shape[-1] != len(x_shifts):
raise PipelineException('Scan and motion arrays have different dimensions')
# Ignore NaN values (present in some older data)
y_clean, x_clean = y_shifts.copy(), x_shifts.copy()
y_clean[np.logical_or(np.isnan(y_shifts), np.isnan(x_shifts))] = 0
x_clean[np.logical_or(np.isnan(y_shifts), np.isnan(x_shifts))] = 0
# Shift each frame
for i, (y_shift, x_shift) in enumerate(zip(y_clean, x_clean)):
image = reshaped_scan[:, :, i].copy()
ndimage.interpolation.shift(image, (-y_shift, -x_shift), order=1,
output=reshaped_scan[:, :, i])
scan = np.reshape(reshaped_scan, original_shape)
return scan | lgpl-3.0 |
wdurhamh/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 33 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print(rcusum)
print(rresid_scaled[skip-1:])
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print('cusum test reject:')
print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any())
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print(H)
print(crit95)
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print(supb, pval, crit)
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
keras-team/keras-io | examples/nlp/semantic_similarity_with_bert.py | 1 | 11604 | """
Title: Semantic Similarity with BERT
Author: [Mohamad Merchant](https://twitter.com/mohmadmerchant1)
Date created: 2020/08/15
Last modified: 2020/08/29
Description: Natural Language Inference by fine-tuning BERT model on SNLI Corpus.
"""
"""
## Introduction
Semantic Similarity is the task of determining how similar
two sentences are, in terms of what they mean.
This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpus
to predict sentence semantic similarity with Transformers.
We will fine-tune a BERT model that takes two sentences as inputs
and that outputs a similarity score for these two sentences.
### References
* [BERT](https://arxiv.org/pdf/1810.04805.pdf)
* [SNLI](https://nlp.stanford.edu/projects/snli/)
"""
"""
## Setup
Note: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0).
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
"""
## Configuration
"""
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = ["contradiction", "entailment", "neutral"]
"""
## Load the Data
"""
"""shell
curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz
tar -xvzf data.tar.gz
"""
# There are more than 550k samples in total; we will use 100k for this example.
train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000)
valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv")
test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv")
# Shape of the data
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {valid_df.shape[0]}")
print(f"Total test samples: {valid_df.shape[0]}")
"""
Dataset Overview:
- sentence1: The premise caption that was supplied to the author of the pair.
- sentence2: The hypothesis caption that was written by the author of the pair.
- similarity: This is the label chosen by the majority of annotators.
Where no majority exists, the label "-" is used (we will skip such samples here).
Here are the "similarity" label values in our dataset:
- Contradiction: The sentences share no similarity.
- Entailment: The sentences have similar meaning.
- Neutral: The sentences are neutral.
"""
"""
Let's look at one sample from the dataset:
"""
print(f"Sentence1: {train_df.loc[1, 'sentence1']}")
print(f"Sentence2: {train_df.loc[1, 'sentence2']}")
print(f"Similarity: {train_df.loc[1, 'similarity']}")
"""
## Preprocessing
"""
# We have some NaN entries in our train data, we will simply drop them.
print("Number of missing values")
print(train_df.isnull().sum())
train_df.dropna(axis=0, inplace=True)
"""
Distribution of our training targets.
"""
print("Train Target Distribution")
print(train_df.similarity.value_counts())
"""
Distribution of our validation targets.
"""
print("Validation Target Distribution")
print(valid_df.similarity.value_counts())
"""
The value "-" appears as part of our training and validation targets.
We will skip these samples.
"""
train_df = (
train_df[train_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
valid_df = (
valid_df[valid_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
"""
One-hot encode training, validation, and test labels.
"""
train_df["label"] = train_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3)
valid_df["label"] = valid_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3)
test_df["label"] = test_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3)
"""
## Create a custom data generator
"""
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
"""
## Build the model
"""
# Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
sequence_output, pooled_output = bert_model(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(3, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()
"""
Create train and validation data generators
"""
train_data = BertSemanticDataGenerator(
train_df[["sentence1", "sentence2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["sentence1", "sentence2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
"""
## Train the Model
Training is done only for the top layers to perform "feature extraction",
which will allow the model to use the representations of the pretrained model.
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Fine-tuning
This step must only be performed after the feature extraction model has
been trained to convergence on the new data.
This is an optional last step where `bert_model` is unfreezed and retrained
with a very low learning rate. This can deliver meaningful improvement by
incrementally adapting the pretrained features to the new data.
"""
# Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
"""
## Train the entire model end-to-end
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Evaluate model on the test set
"""
test_data = BertSemanticDataGenerator(
test_df[["sentence1", "sentence2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)
"""
## Inference on custom sentences
"""
def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data)[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
"""
Check results on some example sentence pairs.
"""
sentence1 = "Two women are observing something together."
sentence2 = "Two women are standing with their eyes closed."
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs.
"""
sentence1 = "A smiling costumed woman is holding an umbrella"
sentence2 = "A happy woman in a fairy costume holds an umbrella"
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs
"""
sentence1 = "A soccer game with multiple males playing"
sentence2 = "Some men are playing a sport"
check_similarity(sentence1, sentence2)
| apache-2.0 |
soleneulmer/atmos | indicators_molec.py | 1 | 4324 | # ===================================
# CALCULATES Ioff and Ires
# Indicators described in Molecfit II
#
# Solene 20.09.2016
# ===================================
#
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
# from PyAstronomy import pyasl
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import stats
# from sklearn.metrics import mean_squared_error
# from math import sqrt
# from numpy import linalg as LA
# MOLECFIT
#
file_molecfit = '/home/solene/atmos/For_Solene/1203nm/output/molecfit_crires_solene_tac.fits'
hdu_molecfit = fits.open(file_molecfit)
data_molecfit = hdu_molecfit[1].data
cols_molecfit = hdu_molecfit[1].columns
# cols_molecfit.info()
rawwl_molecfit = data_molecfit.field('mlambda')
wl_molecfit = rawwl_molecfit*10e2
trans_molecfit = data_molecfit.field('mtrans')
cflux_molecfit = data_molecfit.field('cflux')
# TELFIT
#
file_telfit = '/home/solene/atmos/trans_telfit.txt'
wl_telfit, trans_telfit, wl_datatelfit, flux_datatelfit = np.loadtxt(
file_telfit, unpack=True)
# Interpolation
f_molecfit = interp1d(wl_molecfit, cflux_molecfit, kind='cubic')
ftrans_molecfit = interp1d(wl_molecfit, trans_molecfit, kind='cubic')
# f_tapas = interp1d(wlcorr_tapas, trans_tapas)
# **1** BINNED DATA
# 3 delta-lambda = 0.036
# Mean and std deviation of bins on the telluric CORRECTED spectrum
fluxmean_bin_means, bin_edges, binnumber = stats.binned_statistic(
wl_datatelfit, f_molecfit(wl_datatelfit), statistic='mean',
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
fluxstd_bin_means, _, _ = stats.binned_statistic(
wl_datatelfit, f_molecfit(wl_datatelfit), statistic=np.std,
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width/2
# **2** Bins where average TRANSMISSION is > 0.99
flux_trans_mean_bin_means, _, _ = stats.binned_statistic(
wl_datatelfit, ftrans_molecfit(wl_datatelfit), statistic='mean',
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
# cont_bin_means = flux_trans_mean_bin_means[flux_trans_mean_bin_means > 0.99]
ind_cont = np.where(flux_trans_mean_bin_means > 0.99)
ind_out = np.where((flux_trans_mean_bin_means < 0.95) &
(flux_trans_mean_bin_means > 0.1))
# plt.plot(bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], 'kx')
# **3** Interpolation of the continuum cubic
# f_cont = interp1d(bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], kind='cubic')
# Extrapolation with constant value spline
f_cont = InterpolatedUnivariateSpline(
bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], ext=3)
# bbox=[bin_centers[ind_cont][0], bin_centers[ind_cont][-1]],
# **5** Subtract cont to mean flux
# and Divide offset and std by interpolated continuum mean value
sys_offset = (fluxmean_bin_means - f_cont(bin_centers)) / f_cont(bin_centers)
flux_std = fluxstd_bin_means / f_cont(bin_centers)
# **6** independant WL = Divide by average absorption
absorp_molecfit = 1 - flux_trans_mean_bin_means
sys_offset_final = sys_offset / absorp_molecfit
flux_std_final = flux_std / absorp_molecfit
plt.figure(1)
plt.plot(wl_datatelfit, flux_datatelfit, 'b.-', label='Raw data')
# plt.hlines(flux_bin_means, bin_edges[:-1],
# bin_edges[1:], colors='g', lw=5, label='binned statistic of data')
plt.plot(bin_centers, fluxmean_bin_means, 'rx-', label='Mean binned data')
plt.plot(bin_centers, fluxstd_bin_means, 'kx-', label='Standard deviation binned data')
plt.legend()
plt.figure(2)
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_molecfit, trans_molecfit, 'r-', label='Molecfit')
plt.plot(wl_datatelfit, f_molecfit(wl_datatelfit),
'b-', label='Corrected data - Molecfit')
plt.plot(wl_datatelfit, f_cont(wl_datatelfit),
'k-', label='Interpolated Continuum')
plt.plot(sys_offset_final[ind_out], flux_std_final[ind_out], 'kx')
plt.plot(flux_trans_mean_bin_means[ind_out],
sys_offset_final[ind_out], 'kx', label='Ioff vs Transmission')
plt.plot(flux_trans_mean_bin_means[ind_out],
flux_std_final[ind_out], 'r.', label='Ires vs Transmission')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Transmission')
plt.legend(loc=3.)
plt.show()
| mit |
mxjl620/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/utils/tests/test_class_weight.py | 14 | 6559 | import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("auto", y)
expected = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, expected ** 2)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y, [0, 1, 1, 2, 2, 3])
expected = np.asarray([1/3., 1/3., 1/3., 5/3., 5/3., 5/3.])
assert_array_almost_equal(sample_weight, expected)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
jm-begon/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
rigetticomputing/grove | grove/tomography/state_tomography.py | 1 | 11664 | ##############################################################################
# Copyright 2017-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import logging
import numpy as np
import matplotlib.pyplot as plt
from pyquil.quilbase import Pragma
from scipy.sparse import csr_matrix, coo_matrix
from pyquil.quil import Program
import grove.tomography.operator_utils
from grove.tomography.tomography import TomographyBase, TomographySettings, DEFAULT_SOLVER_KWARGS
from grove.tomography import tomography
import grove.tomography.utils as ut
import grove.tomography.operator_utils as o_ut
_log = logging.getLogger(__name__)
qt = ut.import_qutip()
cvxpy = ut.import_cvxpy()
UNIT_TRACE = 'unit_trace'
POSITIVE = 'positive'
DEFAULT_STATE_TOMO_SETTINGS = TomographySettings(
constraints={UNIT_TRACE},
solver_kwargs=DEFAULT_SOLVER_KWARGS
)
def _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops):
"""
Prepare the coefficient matrix for state tomography. This function uses sparse matrices
for much greater efficiency.
The coefficient matrix is defined as:
.. math::
C_{(jk)m} = \tr{\Pi_{s_j} \Lambda_k(P_m)} = \sum_{r}\pi_{jr}(\mathcal{R}_{k})_{rm}
where :math:`\Lambda_k(\cdot)` is the quantum map corresponding to the k-th pre-measurement
channel, i.e., :math:`\Lambda_k(\rho) = E_k \rho E_k^\dagger` where :math:`E_k` is the k-th
channel operator. This map can also be represented via its transfer matrix
:math:`\mathcal{R}_{k}`. In that case one also requires the overlap between the (generalized)
Pauli basis ops and the projection operators
:math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`.
See the grove documentation on tomography for detailed information.
:param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation.
:param list channel_ops: The pre-measurement channel operators as `qutip.Qobj`
:return: The coefficient matrix necessary to set up the binomial state tomography problem.
:rtype: scipy.sparse.csr_matrix
"""
channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek)) for ek in channel_ops]
# This bit could be more efficient but does not run super long and is thus preserved for
# readability.
pi_jr = csr_matrix(
[pauli_basis.project_op(n_j).toarray().ravel()
for n_j in readout_povm.ops])
# Dict used for constructing our sparse matrix, keys are tuples (row_index, col_index), values
# are the non-zero elements of the final matrix.
c_jk_m_elms = {}
# This explicitly exploits the sparsity of all operators involved
for k in range(len(channel_ops)):
pi_jr__rk_rm = (pi_jr * channel_transfer_matrices[k]).tocoo()
for (j, m, val) in ut.izip(pi_jr__rk_rm.row, pi_jr__rk_rm.col, pi_jr__rk_rm.data):
# The multi-index (j,k) is enumerated in column-major ordering (like Fortran arrays)
c_jk_m_elms[(j + k * readout_povm.pi_basis.dim, m)] = val.real
# create sparse matrix from COO-format (see scipy.sparse docs)
_keys, _values = ut.izip(*c_jk_m_elms.items())
_rows, _cols = ut.izip(*_keys)
c_jk_m = coo_matrix((list(_values), (list(_rows), list(_cols))),
shape=(readout_povm.pi_basis.dim * len(channel_ops),
pauli_basis.dim)).tocsr()
return c_jk_m
class StateTomography(TomographyBase):
"""
A StateTomography object encapsulates the result of quantum state estimation from tomographic
data. It provides convenience functions for visualization and computing state fidelities.
"""
__tomography_type__ = "STATE"
@staticmethod
def estimate_from_ssr(histograms, readout_povm, channel_ops, settings):
"""
Estimate a density matrix from single shot histograms obtained by measuring bitstrings in
the Z-eigenbasis after application of given channel operators.
:param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`.
:param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s.
:param TomographySettings settings: The solver and estimation settings.
:return: The generated StateTomography object.
:rtype: StateTomography
"""
nqc = len(channel_ops[0].dims[0])
pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc
pi_basis = readout_povm.pi_basis
if not histograms.shape[1] == pi_basis.dim: # pragma no coverage
raise ValueError("Currently tomography is only implemented for two-level systems.")
# prepare the log-likelihood function parameters, see documentation
n_kj = np.asarray(histograms)
c_jk_m = _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops)
rho_m = cvxpy.Variable(pauli_basis.dim)
p_jk = c_jk_m * rho_m
obj = -n_kj.ravel() * cvxpy.log(p_jk)
p_jk_mat = cvxpy.reshape(p_jk, pi_basis.dim, len(channel_ops)) # cvxpy has col-major order
# Default constraints:
# MLE must describe valid probability distribution
# i.e., for each k, p_jk must sum to one and be element-wise non-negative:
# 1. \sum_j p_jk == 1 for all k
# 2. p_jk >= 0 for all j, k
# where p_jk = \sum_m c_jk_m rho_m
constraints = [
p_jk >= 0,
np.matrix(np.ones((1, pi_basis.dim))) * p_jk_mat == 1,
]
rho_m_real_imag = sum((rm * o_ut.to_realimag(Pm)
for (rm, Pm) in ut.izip(rho_m, pauli_basis.ops)), 0)
if POSITIVE in settings.constraints:
if tomography._SDP_SOLVER.is_functional():
constraints.append(rho_m_real_imag >> 0)
else: # pragma no coverage
_log.warning("No convex solver capable of semi-definite problems installed.\n"
"Dropping the positivity constraint on the density matrix.")
if UNIT_TRACE in settings.constraints:
# this assumes that the first element of the Pauli basis is always proportional to
# the identity
constraints.append(rho_m[0, 0] == 1. / pauli_basis.ops[0].tr().real)
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
_log.info("Starting convex solver")
prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs)
if prob.status != cvxpy.OPTIMAL: # pragma no coverage
_log.warning("Problem did not converge to optimal solution. "
"Solver settings: {}".format(settings.solver_kwargs))
return StateTomography(np.array(rho_m.value).ravel(), pauli_basis, settings)
def __init__(self, rho_coeffs, pauli_basis, settings):
"""
Construct a StateTomography to encapsulate the result of estimating the quantum state from
a quantum tomography measurement.
:param numpy.ndarray r_est: The estimated quantum state represented in a given (generalized)
Pauli basis.
:param OperatorBasis pauli_basis: The employed (generalized) Pauli basis.
:param TomographySettings settings: The settings used to estimate the state.
"""
self.rho_coeffs = rho_coeffs
self.pauli_basis = pauli_basis
self.rho_est = sum((r_m * p_m for r_m, p_m in ut.izip(rho_coeffs, pauli_basis.ops)))
self.settings = settings
def fidelity(self, other):
"""
Compute the quantum state fidelity of the estimated state with another state.
:param qutip.Qobj other: The other quantum state.
:return: The fidelity, a real number between 0 and 1.
:rtype: float
"""
return qt.fidelity(self.rho_est, other)
def plot_state_histogram(self, ax):
"""
Visualize the complex matrix elements of the estimated state.
:param matplotlib.Axes ax: A matplotlib Axes object to plot into.
"""
title = "Estimated state"
nqc = int(round(np.log2(self.rho_est.data.shape[0])))
labels = ut.basis_labels(nqc)
return ut.state_histogram(self.rho_est, ax, title)
def plot(self):
"""
Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure
"""
width = 10
# The pleasing golden ratio.
height = width / 1.618
f = plt.figure(figsize=(width, height))
ax = f.add_subplot(111, projection="3d")
self.plot_state_histogram(ax)
return f
def state_tomography_programs(state_prep, qubits=None,
rotation_generator=tomography.default_rotations):
"""
Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append
tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in
the program should be tomographically rotated.
:param Program state_prep: The program to prepare the state to be tomographed.
:param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If
`None`, performs it on all in state_prep.
:param generator rotation_generator: A generator that yields tomography rotations to perform.
:return: Program for state tomography.
:rtype: Program
"""
if qubits is None:
qubits = state_prep.get_qubits()
for tomography_program in rotation_generator(*qubits):
state_tomography_program = Program(Pragma("PRESERVE_BLOCK"))
state_tomography_program.inst(state_prep)
state_tomography_program.inst(tomography_program)
state_tomography_program.inst(Pragma("END_PRESERVE_BLOCK"))
yield state_tomography_program
def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False):
"""
Method to perform both a QPU and QVM state tomography, and use the latter as
as reference to calculate the fidelity of the former.
:param Program preparation_program: Program to execute.
:param int nsamples: Number of samples to take for the program.
:param QVMConnection|QPUConnection cxn: Connection on which to run the program.
:param list qubits: List of qubits for the program.
to use in the tomography analysis.
:param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run``
instead of ``cxn.run_and_measure``.
:return: The state tomogram.
:rtype: StateTomography
"""
return tomography._do_tomography(preparation_program, nsamples, cxn, qubits,
tomography.MAX_QUBITS_STATE_TOMO,
StateTomography, state_tomography_programs,
DEFAULT_STATE_TOMO_SETTINGS, use_run=use_run)
| apache-2.0 |
mupif/mupif | mupif/Field.py | 1 | 42683 | #
# MuPIF: Multi-Physics Integration Framework
# Copyright (C) 2010-2015 Borek Patzak
#
# Czech Technical University, Faculty of Civil Engineering,
# Department of Structural Mechanics, 166 29 Prague, Czech Republic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from builtins import range
from builtins import object
from . import Cell
from . import FieldID
from . import ValueType
from . import BBox
from . import APIError
from . import MupifObject
from . import Mesh
from .Physics import PhysicalQuantities
from .Physics.PhysicalQuantities import PhysicalQuantity
from numpy import array, arange, random, zeros
import numpy
import copy
import Pyro4
from enum import IntEnum
import logging
log = logging.getLogger()
try:
import cPickle as pickle # faster serialization if available
except:
import pickle
# import logging - never use it here, it causes cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute
# lookup thread.lock failed
# debug flag
debug = 0
class FieldType(IntEnum):
"""
Represent the supported values of FieldType, i.e. FT_vertexBased or FT_cellBased.
"""
FT_vertexBased = 1
FT_cellBased = 2
@Pyro4.expose
class Field(MupifObject.MupifObject, PhysicalQuantity):
"""
Representation of field. Field is a scalar, vector, or tensorial
quantity defined on a spatial domain. The field, however is assumed
to be fixed at certain time. The field can be evaluated in any spatial point
belonging to underlying domain.
Derived classes will implement fields defined on common discretizations,
like fields defined on structured/unstructured FE meshes, FD grids, etc.
.. automethod:: __init__
.. automethod:: _evaluate
"""
def __init__(self, mesh, fieldID, valueType, units, time, values=None, fieldType=FieldType.FT_vertexBased, objectID=0, metaData={}):
"""
Initializes the field instance.
:param Mesh.Mesh mesh: Instance of a Mesh class representing the underlying discretization
:param FieldID fieldID: Field type (displacement, strain, temperature ...)
:param ValueType valueType: Type of field values (scalar, vector, tensor). Tensor is a tuple of 9 values. It is changed to 3x3 for VTK output automatically.
:param Physics.PhysicalUnits units: Field value units
:param Physics.PhysicalQuantity time: Time associated with field values
:param values: Field values (format dependent on a particular field type, however each individual value should be stored as tuple, even scalar value)
:type values: list of tuples representing individual values
:param FieldType fieldType: Optional, determines field type (values specified as vertex or cell values), default is FT_vertexBased
:param int objectID: Optional ID of problem object/subdomain to which field is related, default = 0
:param dict metaData: Optionally pass metadata for merging
"""
super(Field, self).__init__()
self.mesh = mesh
self.fieldID = fieldID
self.valueType = valueType
self.time = time
self.uri = None # pyro uri; used in distributed setting
# self.log = logging.getLogger()
self.fieldType = fieldType
self.objectID = objectID
if values is None:
if self.fieldType == FieldType.FT_vertexBased:
ncomponents = mesh.getNumberOfVertices()
else:
ncomponents = mesh.getNumberOfCells()
self.value = zeros((ncomponents, self.getRecordSize()))
else:
self.value = values
if PhysicalQuantities.isPhysicalUnit(units):
self.unit = units
else:
self.unit = PhysicalQuantities.findUnit(units)
self.setMetadata('Units', self.unit.name())
self.setMetadata('Type', 'mupif.Field.Field')
self.setMetadata('Type_ID', str(self.fieldID))
self.setMetadata('FieldType', str(fieldType))
self.setMetadata('ValueType', str(self.valueType))
self.updateMetadata(metaData)
@classmethod
def loadFromLocalFile(cls, fileName):
"""
Alternative constructor which loads instance directly from a Pickle module.
:param str fileName: File name
:return: Returns Field instance
:rtype: Field
"""
return pickle.load(open(fileName, 'rb'))
def getRecordSize(self):
"""
Return the number of scalars per value, depending on :obj:`valueType` passed when constructing the instance.
:return: number of scalars (1,3,9 respectively for scalar, vector, tensor)
:rtype: int
"""
if self.valueType == ValueType.Scalar:
return 1
elif self.valueType == ValueType.Vector:
return 3
elif self.valueType == ValueType.Tensor:
return 9
else:
raise ValueError("Invalid value of Field.valueType (%d)." % self.valueType)
def getMesh(self):
"""
Obtain mesh.
:return: Returns a mesh of underlying discretization
:rtype: Mesh.Mesh
"""
return self.mesh
def getValueType(self):
"""
Returns ValueType of the field, e.g. scalar, vector, tensor.
:return: Returns value type of the receiver
:rtype: ValueType
"""
return self.valueType
def getFieldID(self):
"""
Returns FieldID, e.g. FID_Displacement, FID_Temperature.
:return: Returns field ID
:rtype: FieldID
"""
return self.fieldID
def getFieldIDName(self):
"""
Returns name of the field.
:return: Returns fieldID name
:rtype: string
"""
return self.fieldID.name
def getFieldType(self):
"""
Returns receiver field type (values specified as vertex or cell values)
:return: Returns fieldType id
:rtype: FieldType
"""
return self.fieldType
def getTime(self):
"""
Get time of the field.
:return: Time of field data
:rtype: Physics.PhysicalQuantity
"""
return self.time
def evaluate(self, positions, eps=0.0):
"""
Evaluates the receiver at given spatial position(s).
:param positions: 1D/2D/3D position vectors
:type positions: tuple, a list of tuples
:param float eps: Optional tolerance for probing whether the point belongs to a cell (should really not be used)
:return: field value(s)
:rtype: Physics.PhysicalQuantity with given value or tuple of values
"""
# test if positions is a list of positions
if isinstance(positions, list):
ans = []
for pos in positions:
ans.append(self._evaluate(pos, eps))
return PhysicalQuantity(ans, self.unit)
else:
# single position passed
return PhysicalQuantity(self._evaluate(positions, eps), self.unit)
def _evaluate(self, position, eps):
"""
Evaluates the receiver at a single spatial position.
:param tuple position: 1D/2D/3D position vector
:param float eps: Optional tolerance
:return: field value
:rtype: tuple of doubles
.. note:: This method has some issues related to https://sourceforge.net/p/mupif/tickets/22/ .
"""
cells = self.mesh.giveCellLocalizer().giveItemsInBBox(BBox.BBox([c-eps for c in position], [c+eps for c in position]))
# answer=None
if len(cells):
if self.fieldType == FieldType.FT_vertexBased:
for icell in cells:
try:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
answer = icell.interpolate(position, [self.value[i.number] for i in icell.getVertices()])
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
raise
return answer
except ZeroDivisionError:
print('ZeroDivisionError?')
log.debug(icell.number)
log.debug(position)
icell.debug = 1
log.debug(icell.containsPoint(position), icell.glob2loc(position))
log.error('Field::evaluate - no source cell found for position %s' % str(position))
for icell in cells:
log.debug(icell.number)
log.debug(icell.containsPoint(position))
log.debug(icell.glob2loc(position))
else: # if (self.fieldType == FieldType.FT_vertexBased):
# in case of cell based fields do compute average of cell values containing point
# this typically happens when point is on the shared edge or vertex
count = 0
for icell in cells:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
tmp = self.value[icell.number]
if count == 0:
answer = list(tmp)
else:
for i in answer:
answer = [x+y for x in answer for y in tmp]
count += 1
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
log.error(icell.getVertices())
raise
# end loop over icells
if count == 0:
log.error('Field::evaluate - no source cell found for position %s', str(position))
# for icell in cells:
# log.debug(icell.number, icell.containsPoint(position), icell.glob2loc(position))
else:
answer = [x/count for x in answer]
return answer
else:
# no source cell found
log.error('Field::evaluate - no source cell found for position ' + str(position))
raise ValueError('Field::evaluate - no source cell found for position ' + str(position))
def getVertexValue(self, vertexID):
"""
Returns the value associated with a given vertex.
:param int vertexID: Vertex identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_vertexBased:
return PhysicalQuantity(self.value[vertexID], self.unit)
else:
raise TypeError('Attempt to acces vertex value of cell based field, use evaluate instead')
def getCellValue(self, cellID):
"""
Returns the value associated with a given cell.
:param int cellID: Cell identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_cellBased:
return PhysicalQuantity(self.value[cellID], self.unit)
else:
raise TypeError('Attempt to acces cell value of vertex based field, use evaluate instead')
def _giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
Depreceated, use getVertexValue() or getCellValue()
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: Physics.PhysicalQuantity
"""
return PhysicalQuantity(self.value[componentID], self.unit)
def giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: tuple
"""
return self.value[componentID]
def setValue(self, componentID, value):
"""
Sets the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:param tuple value: Value to be set for a given component, should have the same units as receiver
.. Note:: If a mesh has mapping attached (a mesh view) then we have to remember value locally and record change. The source field values are updated after commit() method is invoked.
"""
self.value[componentID] = value
def commit(self):
"""
Commits the recorded changes (via setValue method) to a primary field.
"""
def getObjectID(self):
"""
Returns field objectID.
:return: Object's ID
:rtype: int
"""
return self.objectID
def getUnits(self):
"""
:return: Returns units of the receiver
:rtype: Physics.PhysicalUnits
"""
return self.unit
def merge(self, field):
"""
Merges the receiver with given field together. Both fields should be on different parts of the domain (can also overlap), but should refer to same underlying discretization, otherwise unpredictable results can occur.
:param Field field: given field to merge with.
"""
# first merge meshes
mesh = copy.deepcopy(self.mesh)
mesh.merge(field.mesh)
log.debug(mesh)
# merge the field values
# some type checking first
if self.fieldType != field.fieldType:
raise TypeError("Field::merge: fieldType of receiver and parameter is different")
if self.fieldType == FieldType.FT_vertexBased:
values = [0]*mesh.getNumberOfVertices()
for v in range(self.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(self.mesh.getVertex(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(field.mesh.getVertex(v).label)] = field.value[v]
else:
values = [0]*mesh.getNumberOfCells()
for v in range(self.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(self.mesh.giveCell(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(field.mesh.giveCell(v).label)] = field.value[v]
self.mesh = mesh
self.value = values
def field2VTKData (self, name=None, lookupTable=None):
"""
Creates VTK representation of the receiver. Useful for visualization. Requires pyvtk module.
:param str name: human-readable name of the field
:param pyvtk.LookupTable lookupTable: color lookup table
:return: Instance of pyvtk
:rtype: pyvtk.VtkData
"""
import pyvtk
if name is None:
name = self.getFieldIDName()
if lookupTable and not isinstance(lookupTable, pyvtk.LookupTable):
log.info('ignoring lookupTable which is not a pyvtk.LookupTable instance.')
lookupTable = None
if lookupTable is None:
lookupTable=pyvtk.LookupTable([(0, .231, .298, 1.0), (.4, .865, .865, 1.0), (.8, .706, .016, 1.0)], name='coolwarm')
# Scalars use different name than 'coolwarm'. Then Paraview uses its own color mapping instead of taking
# 'coolwarm' from *.vtk file. This prevents setting Paraview's color mapping.
scalarsKw = dict(name=name, lookup_table='default')
else:
scalarsKw = dict(name=name, lookup_table=lookupTable.name)
# see http://cens.ioc.ee/cgi-bin/cvsweb/python/pyvtk/examples/example1.py?rev=1.3 for an example
vectorsKw = dict(name=name) # vectors don't have a lookup_table
if self.fieldType == FieldType.FT_vertexBased:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Vectors(self.value, **vectorsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
else:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Vectors(self.value, **vectorsKw),lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
def getMartixForTensor(self, values):
"""
Reshape values to a list with 3x3 arrays. Usable for VTK export.
:param list values: List containing tuples of 9 values, e.g. [(1,2,3,4,5,6,7,8,9), (1,2,3,4,5,6,7,8,9), ...]
:return: List containing 3x3 matrices for each tensor
:rtype: list
"""
tensor = []
for i in values:
tensor.append(numpy.reshape(i, (3, 3)))
return tensor
def dumpToLocalFile(self, fileName, protocol=pickle.HIGHEST_PROTOCOL):
"""
Dump Field to a file using a Pickle serialization module.
:param str fileName: File name
:param int protocol: Used protocol - 0=ASCII, 1=old binary, 2=new binary
"""
pickle.dump(self, open(fileName, 'wb'), protocol)
def field2Image2D(self, plane='xy', elevation=(-1.e-6, 1.e-6), numX=10, numY=20, interp='linear', fieldComponent=0, vertex=True, colorBar='horizontal', colorBarLegend='', barRange=(None, None), barFormatNum='%.3g', title='', xlabel='', ylabel='', fileName='', show=True, figsize=(8, 4), matPlotFig=None):
"""
Plots and/or saves 2D image using a matplotlib library. Works for structured and unstructured 2D/3D fields. 2D/3D fields need to define plane. This method gives only basic viewing options, for aesthetic and more elaborated output use e.g. VTK field export with
postprocessors such as ParaView or Mayavi. Idea from https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#id1
:param str plane: what plane to extract from field, valid values are 'xy', 'xz', 'yz'
:param tuple elevation: range of third coordinate. For example, in plane='xy' is grabs z coordinates in the range
:param int numX: number of divisions on x graph axis
:param int numY: number of divisions on y graph axis
:param str interp: interpolation type when transferring to a grid. Valid values 'linear', 'nearest' or 'cubic'
:param int fieldComponent: component of the field
:param bool vertex: if vertices shoud be plot as points
:param str colorBar: color bar details. Valid values '' for no colorbar, 'vertical' or 'horizontal'
:param str colorBarLegend: Legend for color bar. If '', current field name and units are printed. None prints nothing.
:param tuple barRange: min and max bar range. If barRange=('NaN','NaN'), it is adjusted automatically
:param str barFormatNum: format of color bar numbers
:param str title: title
:param str xlabel: x axis label
:param str ylabel: y axis label
:param str fileName: if nonempty, a filename is written to the disk, usually png, pdf, ps, eps and svg are supported
:param bool show: if the plot should be showed
:param tuple figsize: size of canvas in inches. Affects only showing a figure. Image to a file adjust one side automatically.
:param obj matPlotFig: False means plot window remains in separate thread, True waits until a plot window becomes closed
:return: handle to matPlotFig
:rtype: matPlotFig
"""
try:
import numpy as np
import math
from scipy.interpolate import griddata
import matplotlib
matplotlib.use('TkAgg') # Qt4Agg gives an empty, black window
import matplotlib.pyplot as plt
except ImportError as e:
log.error('Skipping field2Image2D due to missing modules: %s' % e)
return None
# raise
if self.fieldType != FieldType.FT_vertexBased:
raise APIError.APIError('Only FieldType.FT_vertexBased is now supported')
mesh = self.getMesh()
numVertices = mesh.getNumberOfVertices()
indX = 0
indY = 0
elev = 0
if plane == 'xy':
indX = 0
indY = 1
elev = 2
elif plane == 'xz':
indX = 0
indY = 2
elev = 1
elif plane == 'yz':
indX = 1
indY = 2
elev = 0
# find eligible vertex points and values
vertexPoints = []
vertexValue = []
for i in range(0, numVertices):
coords = mesh.getVertex(i).getCoordinates()
# print(coords)
value = self.giveValue(i)[fieldComponent]
if elevation[1] > coords[elev] > elevation[0]:
vertexPoints.append((coords[indX], coords[indY]))
vertexValue.append(value)
if len(vertexPoints) == 0:
log.info('No valid vertex points found, putting zeros on domain 1 x 1')
for i in range(5):
vertexPoints.append((i % 2, i/4.))
vertexValue.append(0)
# for i in range (0, len(vertexPoints)):
# print (vertexPoints[i], vertexValue[i])
vertexPointsArr = np.array(vertexPoints)
vertexValueArr = np.array(vertexValue)
xMin = vertexPointsArr[:, 0].min()
xMax = vertexPointsArr[:, 0].max()
yMin = vertexPointsArr[:, 1].min()
yMax = vertexPointsArr[:, 1].max()
# print(xMin, xMax, yMin, yMax)
grid_x, grid_y = np.mgrid[xMin:xMax:complex(0, numX), yMin:yMax:complex(0, numY)]
grid_z1 = griddata(vertexPointsArr, vertexValueArr, (grid_x, grid_y), interp)
# print (grid_z1.T)
plt.ion() # ineractive mode
if matPlotFig is None:
matPlotFig = plt.figure(figsize=figsize)
# plt.xlim(xMin, xMax)
# plt.ylim(yMin, yMax)
plt.clf()
plt.axis((xMin, xMax, yMin, yMax))
image = plt.imshow(grid_z1.T, extent=(xMin, xMax, yMin, yMax), origin='lower', aspect='equal')
# plt.margins(tight=True)
# plt.tight_layout()
# plt.margins(x=-0.3, y=-0.3)
if colorBar:
cbar = plt.colorbar(orientation=colorBar, format=barFormatNum)
if colorBarLegend is not None:
if colorBarLegend == '':
colorBarLegend = self.getFieldIDName() + '_' + str(fieldComponent)
if self.unit is not None:
colorBarLegend = colorBarLegend + ' (' + self.unit.name() + ')'
cbar.set_label(colorBarLegend, rotation=0 if colorBar == 'horizontal' else 90)
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if vertex == 1:
plt.scatter(vertexPointsArr[:, 0], vertexPointsArr[:, 1], marker='o', c='b', s=5, zorder=10)
# plt.axis('equal')
# plt.gca().set_aspect('equal', adjustable='box-forced')
if isinstance(barRange[0], float) or isinstance(barRange[0], int):
image.set_clim(vmin=barRange[0], vmax=barRange[1])
if fileName:
plt.savefig(fileName, bbox_inches='tight')
if show:
matPlotFig.canvas.draw()
# plt.ioff()
# plt.show(block=True)
return matPlotFig
def field2Image2DBlock(self):
"""
Block an open window from matPlotLib. Waits until closed.
"""
import matplotlib.pyplot as plt
plt.ioff()
plt.show(block=True)
def toHdf5(self, fileName, group='component1/part1'):
"""
Dump field to HDF5, in a simple format suitable for interoperability (TODO: document).
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be saved under.
The HDF hierarchy is like this::
group
|
+--- mesh_01 {hash=25aa0aa04457}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- mesh_02 {hash=17809e2b86ea}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- ...
+--- field_01
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_02
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_03
| +--- -> mesh_02
| \--- [cell_values]
\--- ...
where ``plain`` names are HDF (sub)groups, ``[bracketed]`` names are datasets, ``{name=value}`` are HDF attributes, ``->`` prefix indicated HDF5 hardlink (transparent to the user); numerical suffixes (``_01``, ...) are auto-allocated. Mesh objects are hardlinked using HDF5 hardlinks if an identical mesh is already stored in the group, based on hexdigest of its full data.
.. note:: This method has not been tested yet. The format is subject to future changes.
"""
import h5py
hdf = h5py.File(fileName, 'a', libver='latest')
if group not in hdf:
gg = hdf.create_group(group)
else:
gg = hdf[group]
# raise IOError('Path "%s" is already used in "%s".'%(path,fileName))
def lowestUnused(trsf, predicate, start=1):
"""
Find the lowest unused index, where *predicate* is used to test for existence, and *trsf* transforms
integer (starting at *start* and incremented until unused value is found) to whatever predicate accepts
as argument. Lowest transformed value is returned.
"""
import itertools
for i in itertools.count(start=start):
t = trsf(i)
if not predicate(t):
return t
# save mesh (not saved if there already)
newgrp = lowestUnused(trsf=lambda i: 'mesh_%02d' % i, predicate=lambda t: t in gg)
mh5 = self.getMesh().asHdf5Object(parentgroup=gg, newgroup=newgrp)
if self.value:
fieldGrp = hdf.create_group(lowestUnused(trsf=lambda i, group=group: group+'/field_%02d' % i, predicate=lambda t: t in hdf))
fieldGrp['mesh'] = mh5
fieldGrp.attrs['fieldID'] = self.fieldID
fieldGrp.attrs['valueType'] = self.valueType
# string/bytes may not contain NULL when stored as string in HDF5
# see http://docs.h5py.org/en/2.3/strings.html
# that's why we cast to opaque type "void" and uncast using tostring before unpickling
fieldGrp.attrs['units'] = numpy.void(pickle.dumps(self.unit))
fieldGrp.attrs['time'] = numpy.void(pickle.dumps(self.time))
# fieldGrp.attrs['time']=self.time.getValue()
if self.fieldType == FieldType.FT_vertexBased:
val = numpy.empty(shape=(self.getMesh().getNumberOfVertices(), self.getRecordSize()), dtype=numpy.float)
for vert in range(self.getMesh().getNumberOfVertices()):
val[vert] = self.getVertexValue(vert).getValue()
fieldGrp['vertex_values'] = val
elif self.fieldType == FieldType.FT_cellBased:
# raise NotImplementedError("Saving cell-based fields to HDF5 is not yet implemented.")
val = numpy.empty(shape=(self.getMesh().getNumberOfCells(), self.getRecordSize()), dtype=numpy.float)
for cell in range(self.getMesh().getNumberOfCells()):
val[cell] = self.getCellValue(cell)
fieldGrp['cell_values'] = val
else:
raise RuntimeError("Unknown fieldType %d." % self.fieldType)
@staticmethod
def makeFromHdf5(fileName, group='component1/part1'):
"""
Restore Fields from HDF5 file.
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be read from (IOError is raised if the group does not exist).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
.. note:: This method has not been tested yet.
"""
import h5py
hdf = h5py.File(fileName, 'r', libver='latest')
grp = hdf[group]
# load mesh and field data from HDF5
meshObjs = [obj for name, obj in grp.items() if name.startswith('mesh_')]
fieldObjs = [obj for name, obj in grp.items() if name.startswith('field_')]
# construct all meshes as mupif objects
meshes = [Mesh.Mesh.makeFromHdf5Object(meshObj) for meshObj in meshObjs]
# construct all fields as mupif objects
ret = []
for f in fieldObjs:
if 'vertex_values' in f:
fieldType, values = FieldType.FT_vertexBased, f['vertex_values']
elif 'cell_values' in f:
fieldType, values = FieldType.FT_cellBased, f['cell_values']
else:
ValueError("HDF5/mupif format error: unable to determine field type.")
fieldID, valueType, units, time = FieldID(f.attrs['fieldID']), f.attrs['valueType'], f.attrs['units'].tostring(), f.attrs['time'].tostring()
if units == '':
units = None # special case, handled at saving time
else:
units = pickle.loads(units)
if time == '':
time = None # special case, handled at saving time
else:
time = pickle.loads(time)
meshIndex = meshObjs.index(f['mesh']) # find which mesh object this field refers to
ret.append(Field(mesh=meshes[meshIndex], fieldID=fieldID, units=units, time=time, valueType=valueType, values=values, fieldType=fieldType))
return ret
def toVTK2(self, fileName, format='ascii'):
"""
Save the instance as Unstructured Grid in VTK2 format (``.vtk``).
:param str fileName: where to save
:param str format: one of ``ascii`` or ``binary``
"""
self.field2VTKData().tofile(filename=fileName, format=format)
@staticmethod
def makeFromVTK2(fileName, unit, time=0, skip=['coolwarm']):
"""
Return fields stored in *fileName* in the VTK2 (``.vtk``) format.
:param str fileName: filename to load from
:param PhysicalUnit unit: physical unit of filed values
:param float time: time value for created fields (time is not saved in VTK2, thus cannot be recovered)
:param [string,] skip: file names to be skipped when reading the input file; the default value skips the default coolwarm colormap.
:returns: one field from VTK
:rtype: Field
"""
import pyvtk
from .dataID import FieldID
if not fileName.endswith('.vtk'):
log.warning('Field.makeFromVTK2: fileName should end with .vtk, you may get in trouble (proceeding).')
ret = []
try:
data = pyvtk.VtkData(fileName) # this is where reading the file happens (inside pyvtk)
except NotImplementedError:
log.info('pyvtk fails to open (binary?) file "%s", trying through vtk.vtkGenericDataReader.' % fileName)
return Field.makeFromVTK3(fileName, time=time, units=unit, forceVersion2=True)
ugr = data.structure
if not isinstance(ugr, pyvtk.UnstructuredGrid):
raise NotImplementedError(
"grid type %s is not handled by mupif (only UnstructuredGrid is)." % ugr.__class__.__name__)
mesh = Mesh.UnstructuredMesh.makeFromPyvtkUnstructuredGrid(ugr)
# get cell and point data
pd, cd = data.point_data.data, data.cell_data.data
for dd, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for d in dd:
# will raise KeyError if fieldID with that name is not defined
if d.name in skip:
continue
fid = FieldID[d.name]
# determine the number of components using the expected number of values from the mesh
expectedNumVal = (mesh.getNumberOfVertices() if fieldType == FieldType.FT_vertexBased else mesh.getNumberOfCells())
nc = len(d.scalars)//expectedNumVal
valueType = ValueType.fromNumberOfComponents(nc)
values = [d.scalars[i*nc:i*nc+nc] for i in range(len(d.scalars))]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=unit, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def toVTK3(self, fileName, **kw):
"""
Save the instance as Unstructured Grid in VTK3 format (``.vtu``). This is a simple proxy for calling :obj:`manyToVTK3` with the instance as the only field to be saved. If multiple fields with identical mesh are to be saved in VTK3, use :obj:`manyToVTK3` directly.
:param fileName: output file name
:param ``**kw``: passed to :obj:`manyToVTK3`
"""
return self.manyToVTK3([self], fileName, **kw)
@staticmethod
def manyToVTK3(fields, fileName, ascii=False, compress=True):
"""
Save all fields passed as argument into VTK3 Unstructured Grid file (``*.vtu``).
All *fields* must be defined on the same mesh object; exception will be raised if this is not the case.
:param list of Field fields:
:param fileName: output file name
:param bool ascii: write numbers are ASCII in the XML-based VTU file (rather than base64-encoded binary in XML)
:param bool compress: apply compression to the data
"""
import vtk
if not fields:
raise ValueError('At least one field must be passed.')
# check if all fields are defined on the same mesh
if len(set([f.mesh for f in fields])) != 1:
raise RuntimeError(
'Not all fields are sharing the same Mesh object (and could not be saved to a single .vtu file')
# convert mesh to VTK UnstructuredGrid
mesh = fields[0].getMesh()
vtkgrid = mesh.asVtkUnstructuredGrid()
# add fields as arrays
for f in fields:
arr = vtk.vtkDoubleArray()
arr.SetNumberOfComponents(f.getRecordSize())
arr.SetName(f.getFieldIDName())
assert f.getFieldType() in (FieldType.FT_vertexBased, FieldType.FT_cellBased) # other future types not handled
if f.getFieldType() == FieldType.FT_vertexBased:
nn = mesh.getNumberOfVertices()
else:
nn = mesh.getNumberOfCells()
arr.SetNumberOfValues(nn)
for i in range(nn):
arr.SetTuple(i, f.giveValue(i))
if f.getFieldType() == FieldType.FT_vertexBased:
vtkgrid.GetPointData().AddArray(arr)
else:
vtkgrid.GetCellData().AddArray(arr)
# write the unstructured grid to file
writer = vtk.vtkXMLUnstructuredGridWriter()
if compress:
writer.SetCompressor(vtk.vtkZLibDataCompressor())
if ascii:
writer.SetDataModeToAscii()
writer.SetFileName(fileName)
# change between VTK5 and VTK6
if vtk.vtkVersion().GetVTKMajorVersion() == 6:
writer.SetInputData(vtkgrid)
else:
writer.SetInputData(vtkgrid)
writer.Write()
# finito
@staticmethod
def makeFromVTK3(fileName, units, time=0, forceVersion2=False):
"""
Create fields from a VTK unstructured grid file (``.vtu``, format version 3, or ``.vtp`` with *forceVersion2*); the mesh is shared between fields.
``vtk.vtkXMLGenericDataObjectReader`` is used to open the file (unless *forceVersion2* is set), but it is checked that contained dataset is a ``vtk.vtkUnstructuredGrid`` and an error is raised if not.
.. note:: Units are not supported when loading from VTK, all fields will have ``None`` unit assigned.
:param str fileName: VTK (``*.vtu``) file
:param PhysicalUnit units: units of read values
:param float time: time value for created fields (time is not saved in VTK3, thus cannot be recovered)
:param bool forceVersion2: if ``True``, ``vtk.vtkGenericDataObjectReader`` (for VTK version 2) will be used to open the file, isntead of ``vtk.vtkXMLGenericDataObjectReader``; this also supposes *fileName* ends with ``.vtk`` (not checked, but may cause an error).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
"""
import vtk
from .dataID import FieldID
# rr=vtk.vtkXMLUnstructuredGridReader()
if forceVersion2 or fileName.endswith('.vtk'):
rr = vtk.vtkGenericDataObjectReader()
else:
rr = vtk.vtkXMLGenericDataObjectReader()
rr.SetFileName(fileName)
rr.Update()
ugrid = rr.GetOutput()
if not isinstance(ugrid, vtk.vtkUnstructuredGrid):
raise RuntimeError("vtkDataObject read from '%s' must be a vtkUnstructuredGrid (not a %s)" % (
fileName, ugrid.__class__.__name__))
# import sys
# sys.stderr.write(str((ugrid,ugrid.__class__,vtk.vtkUnstructuredGrid)))
# make mesh -- implemented separately
mesh = Mesh.UnstructuredMesh.makeFromVtkUnstructuredGrid(ugrid)
# fields which will be returned
ret = []
# get cell and point data
cd, pd = ugrid.GetCellData(), ugrid.GetPointData()
for data, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for idata in range(data.GetNumberOfArrays()):
aname, arr = pd.GetArrayName(idata), pd.GetArray(idata)
nt = arr.GetNumberOfTuples()
if nt == 0:
raise RuntimeError("Zero values in field '%s', unable to determine value type." % aname)
t0 = arr.GetTuple(0)
valueType = ValueType.fromNumberOfComponents(len(arr.GetTuple(0)))
# this will raise KeyError if fieldID with that name not defined
fid = FieldID[aname]
# get actual values as tuples
values = [arr.GetTuple(t) for t in range(nt)]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=units, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def _sum(self, other, sign1, sign2):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to modify the field values.
"""
raise TypeError('Not supported')
def inUnitsOf(self, *units):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to use convertToUnits method
performing in place conversion.
"""
raise TypeError('Not supported')
# def __deepcopy__(self, memo):
# """ Deepcopy operatin modified not to include attributes starting with underscore.
# These are supposed to be the ones valid only to s specific copy of the receiver.
# An example of these attributes are _PyroURI (injected by Application),
# where _PyroURI contains the URI of specific object, the copy should receive
# its own URI
# """
# cls = self.__class__
# dpcpy = cls.__new__(cls)
#
# memo[id(self)] = dpcpy
# for attr in dir(self):
# if not attr.startswith('_'):
# value = getattr(self, attr)
# setattr(dpcpy, attr, copy.deepcopy(value, memo))
# return dpcpy
| lgpl-3.0 |
felipessalvatore/CNNexample | src/tunning/fc.py | 1 | 2217 | import os
import sys
from random import randint
import numpy as np
import inspect
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import run_test, get_data_4d, get_time
from CNN import CNNModel, train_model, check_valid
from DataHolder import DataHolder
from Config import Config
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d()
my_dataholder = DataHolder(train_dataset,
train_labels,
valid_dataset,
valid_labels,
test_dataset,
test_labels)
FC = [5, 10, 15, 20, 30, 40, 60, 200]
number_of_exp = len(FC)
results = []
duration = []
info = []
for i, fc in enumerate(FC):
print("\n ({0} of {1})".format(i + 1, number_of_exp))
my_config = Config(tunning=True, hidden_nodes_1=3 * fc,
hidden_nodes_2=2 * fc,
hidden_nodes_3=fc)
attrs = vars(my_config)
config_info = ["%s: %s" % item for item in attrs.items()]
info.append(config_info)
my_model = CNNModel(my_config, my_dataholder)
train_model(my_model, my_dataholder, 10001, 1000, False)
current_dur = get_time(train_model, 10001)
score = check_valid(my_model)
results.append(score)
duration.append(current_dur)
best_result = max(list(zip(results, FC, duration, info)))
result_string = """In an experiment with {0} fully connected sizes
the best one is {1} with valid accuracy = {2}.
\nThe training takes {3:.2f} seconds using the following params:
\n{4}""".format(number_of_exp,
best_result[1],
best_result[0],
best_result[2],
best_result[3])
file = open("final.txt", "w")
file.write(result_string)
file.close()
plt.plot(FC, results)
plt.xlabel("hidden_nodes_3")
plt.ylabel("valid acc")
plt.savefig("fc.png")
plt.clf()
plt.plot(FC, duration)
plt.xlabel("hidden_nodes_3")
plt.ylabel("duration (s)")
plt.savefig("fc_du.png")
plt.clf()
| mit |
MichaelAquilina/numpy | numpy/lib/npyio.py | 42 | 71218 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
Kongsea/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
rohanp/scikit-learn | sklearn/model_selection/tests/test_validation.py | 20 | 27961 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from test_split import MockClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y)
def test_cross_val_score_predict_labels():
# Check if ValueError (when labels is None) propagates to cross_val_score
# and cross_val_predict
# And also check if labels is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_label, _, pvalue_label = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, labels=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
p = np.arange(100)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
RuthAngus/LSST-max | code/GP_periodogram.py | 1 | 1066 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from GProtation import make_plot, lnprob, neglnlike
import emcee
import time
import george
from george.kernels import ExpSquaredKernel, ExpSine2Kernel
import scipy.optimize as spo
def GP_periodogram(x, y, yerr, p_init, plims, N):
"""
This function takes a light curves and attempts to produce a GP periodogram.
It returns the value of the highest peak.
The kernel hyperparameters are optimised over a grid of periods.
This is also a "profile likelihood".
x, y, yerr: the light curve.
p_init: the initial guess for the period.
plims: the (log) boundaries for the grid.
N: the number of grid points.
"""
# create the grid
periods = np.linspace(np.exp(plims[0], np.exp(plims[1], 10)
# initial hyperparameters
if __name__ == "__main__":
# fake data
x = np.arange(0, 10, 100)
p = 2
err = .1
y = np.sin(2*np.pi*(1./p)*x) + np.random.randn(100)*err
yerr = np.ones_like(y) * err
p_init, plims = 2, np.log(.1, 5)
GP_periodogram(x, y, yerr, p_init, plims, 10)
| mit |
dudulianangang/vps | EneConsTest.py | 1 | 5969 | import sdf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
plt.style.use('seaborn-white')
# plt.rcParams['font.family'] = 'sans-serif'
# plt.rcParams['font.sans-serif'] = 'Tahoma'
# # plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 16
# plt.rcParams['axes.labelsize'] = 10
# plt.rcParams['axes.labelweight'] = 'bold'
# plt.rcParams['xtick.labelsize'] = 8
# plt.rcParams['ytick.labelsize'] = 8
# plt.rcParams['legend.fontsize'] = 10
# plt.rcParams['figure.titlesize'] = 12
# constants for normalization
n0 = 1.8e20
me = 9.1e-31
qe = 1.6e-19
ep = 8.9e-12
c = 3e8
wp = np.sqrt(n0*qe*qe/me/ep)
ld = c/wp
e0 = me*c*wp/qe
b0 = e0/c
tt = 1/wp
ts = 50*5
te = 1500
pct = 100
en0 = me*c**2
en1 = 0.5*ep*ld**2
# simulation domain
nx = 3500
ny = 3500
lx = 3500
ly = 3500
# figure domain (set by grid)
grid_min_x = 0
grid_max_x = nx
grid_min_y = 0
grid_max_y = ny
Gx = np.linspace(0,lx,nx)
Gy = np.linspace(0,ly,ny)
gx = Gx[grid_min_x:grid_max_x+1]
gy = Gy[grid_min_y:grid_max_y+1]
# figure parameters
# fs = 24
jetcmap = plt.cm.get_cmap("rainbow", 9) #generate a jet map with 10 values
jet_vals = jetcmap(np.arange(9)) #extract those values as an array
jet_vals[0] = [1.0, 1, 1.0, 1] #change the first value
newcmap = mpl.colors.LinearSegmentedColormap.from_list("newjet", jet_vals)
# define array
EneBmE = np.ones(7)
EneBmI = np.ones(7)
EneBgE = np.ones(7)
EneBgI = np.ones(7)
sex = np.ones(7)
sey = np.ones(7)
sez = np.ones(7)
sbx = np.ones(7)
sby = np.ones(7)
sbz = np.ones(7)
TpeC1 = np.ones(7)
TpeS1 = np.ones(7)
TfeC1 = np.ones(7)
TfeS1 = np.ones(7)
TpeC2 = np.ones(7)
TpeS2 = np.ones(7)
TfeC2 = np.ones(7)
TfeS2 = np.ones(7)
TeC1 = np.ones(7)
TeS1 = np.ones(7)
TeC2 = np.ones(7)
TeS2 = np.ones(7)
time = np.ones(7)
# plot function
file = '/Volumes/yaowp2016/'
folder = 'nj'
for i in range(7):
ii = i*5
time[i] = i*ts
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data
GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data
GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data
GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data
WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data
WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data
WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data
WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data
EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct
EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct
EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct
EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
sex[i] = np.sum(Ex**2)*en1
sey[i] = np.sum(Ey**2)*en1
sez[i] = np.sum(Ez**2)*en1
sbx[i] = np.sum(Bx**2)*en1
sby[i] = np.sum(By**2)*en1
sbz[i] = np.sum(Bz**2)*en1
TpeC1[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i]
TfeC1[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
TfeS1[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
TpeS1[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
folder = 'nj_non'
for i in range(7):
ii = i*5
time[i] = i*ts
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data
GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data
GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data
GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data
WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data
WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data
WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data
WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data
EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct
EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct
EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct
EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
sex[i] = np.sum(Ex**2)*en1
sey[i] = np.sum(Ey**2)*en1
sez[i] = np.sum(Ez**2)*en1
sbx[i] = np.sum(Bx**2)*en1
sby[i] = np.sum(By**2)*en1
sbz[i] = np.sum(Bz**2)*en1
TpeC2[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i]
TfeC2[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
TfeS2[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
TpeS2[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
TeC1 = TpeC1+TfeC1
TeS1 = TpeS1+TfeS1
TeC2 = TpeC2+TfeC2
TeS2 = TpeS2+TfeS2
np.save('tpec1.npy', TpeC1)
np.save('tpes1.npy', TpeS1)
np.save('tfec1.npy', TfeC1)
np.save('tfes1.npy', TfeS1)
np.save('tpec2.npy', TpeC2)
np.save('tpes2.npy', TpeS2)
np.save('tfec2.npy', TfeC2)
np.save('tfes2.npy', TfeS2)
np.save('tec1.npy', TeC1)
np.save('tes1.npy', TeS1)
np.save('tec2.npy', TeC2)
np.save('tes2.npy', TeS2)
# plt.figure(figsize=(8,5))
# ax = plt.subplot()
# ax.plot(time, TpeC1,'r-', lw=2, label='tbc-cal')
# ax.plot(time, TpeS1,'r--', lw=2, label='tbc-sys')
# ax.plot(time, TpeC2,'b-', lw=2, label='pbc-cal')
# ax.plot(time, TpeS2,'b--', lw=2, label='pbc-sys')
# plt.xlabel('time($\omega_{pe}^{-1}$)',fontsize=24)
# plt.ylabel('energy($J$)',fontsize=24)
# plt.legend(loc='best', numpoints=1, fancybox=True)
# plt.title('total system energy',fontsize=32,fontstyle='normal')
# plt.show()
# plt.savefig(file+folder+'/plots/'+'TotalEnergyComp.png',bbox_inches='tight') # n means normalized
# plt.close()
| apache-2.0 |
cbertinato/pandas | pandas/tests/indexes/timedeltas/test_scalar_compat.py | 1 | 2391 | """
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
class TestVectorizedTimedelta:
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(ser.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_tdi_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00')])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
td.round(freq='foo')
with pytest.raises(ValueError, match=msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
td.round(freq='M')
with pytest.raises(ValueError, match=msg):
elt.round(freq='M')
| bsd-3-clause |
colin2328/asciiclass | lectures/lec6/match-loop.py | 3 | 2094 | import csv
from sklearn import tree
import editdist
import re
def string_match_score(p1,p2,field):
s1 = p1[field]
s2 = p2[field]
return editdist.distance(s1.lower(),s2.lower())/float(len(s1))
def jaccard_score(p1,p2,field):
name1 = p1[field]
name2 = p2[field]
set1 = set(name1.lower().split())
set2 = set(name2.lower().split())
c = set1.intersection(set2)
return float(len(c)) / (len(set1) + len(set2) - len(c))
def price_score(p1,p2,field):
price1 = p1[field]
if (len(price1) == 0): return 10000
price2 = p2[field]
if (len(price2) == 0): return 10000
price1 = re.sub('[\$,]', '', price1)
price2 = re.sub('[\$,]', '', price2)
price1 = float(price1)
price2 = float(price2)
return abs(price1 - price2)
print "Loading Data"
abtReader = csv.DictReader(open("Abt.csv","rU"))
buyReader = csv.DictReader(open("Buy.csv","rU"))
gtLines = csv.DictReader(open("abt_buy_perfectMapping.csv","rU"))
gtBuyMap = {}
gtAbtMap = {}
abtAr = []
buyAr = []
for r in abtReader:
abtAr.append(r)
for r in buyReader:
buyAr.append(r)
for r in gtLines:
gtAbtMap[r["idAbt"]] = r["idBuy"]
gtBuyMap[r["idBuy"]] = r["idAbt"]
for loop in range(0,10,1):
falsePos = 0
truePos = 0
falseNeg = 0
trueNeg = 0
thresh = float(loop)/10.0
for r1 in buyAr:
bestMatch = 0
bestVal = []
j = 0
for r2 in abtAr:
s = jaccard_score(r1,r2,"name")
if (s > bestMatch):
bestMatch = s
bestVal = r2
if (bestMatch > thresh):
# print "Best match: ",r1["name"],bestVal["name"],"score=",bestMatch
if (gtBuyMap[r1["id"]] == bestVal["id"]):
truePos = truePos + 1
else:
falsePos = falsePos + 1
precision = truePos / float(truePos + falsePos)
recall = truePos / float(len(buyAr))
fmeas = (2.0 * precision * recall) / (precision + recall)
print "THRESH = ",thresh,"TP = ",truePos,"FP = ",falsePos,"PREC = ",precision,"RECALL = ",recall,"F = ",fmeas
| mit |
SophieIPP/ipp-macro-series-parser | ipp_macro_series_parser/demographie/parser.py | 1 | 3235 | # -*- coding: utf-8 -*-
# TAXIPP -- A French microsimulation model
# By: IPP <taxipp@ipp.eu>
#
# Copyright (C) 2012, 2013, 2014, 2015 IPP
# https://github.com/taxipp
#
# This file is part of TAXIPP.
#
# TAXIPP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# TAXIPP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import pandas
import pkg_resources
from ipp_macro_series_parser.config import Config
config_parser = Config(
config_files_directory = os.path.join(pkg_resources.get_distribution('ipp-macro-series-parser').location)
)
xls_directory = os.path.join(config_parser.get('data', 'demographie_directory'), 'xls')
log = logging.getLogger(__name__)
def create_demographie_data_frame():
data_frame = pandas.DataFrame()
for year in range(1999, 2015 + 1):
file_path = os.path.join(xls_directory, u'pyramide-des-ages-{}.xls'.format(year))
skiprows = 5 - (year == 1999)
parse_cols = "A:E"
slice_start = 0
slice_end = 101
sheetname = 'France'
if year <= 2010:
sheetnames = ['France', u'France métropolitaine']
elif year == 2011:
sheetnames = ['{} France'.format(year), u"{} métropole".format(year)]
else:
sheetnames = ['Pyramide {} France'.format(year), u'Pyramide {} métropole'.format(year)]
for sheetname in sheetnames:
try:
df = pandas.read_excel(
file_path,
# na_values = '-',
sheetname = sheetname,
skiprows = skiprows,
parse_cols = parse_cols).iloc[slice_start:slice_end]
df['year'] = year
if sheetname in ['France', u'France métropolitaine']:
df['champ'] = sheetname
else:
df['champ'] = u'France métropolitaine' if u'métropole' in sheetname else 'France'
# All column name on one line
remove_cr = dict(
(column, column.replace(u"\n", " ").replace(" ", " ")) for column in df.columns)
df.rename(columns = remove_cr, inplace = True)
# Femmes _> Nombre de femmes etc
df.rename(columns = dict(
Femmes = "Nombre de femmes",
Hommes = "Nombre d'hommes"), inplace = True)
data_frame = pandas.concat((data_frame, df))
del df
except Exception, e:
print year
print sheetname
raise(e)
return pandas.melt(data_frame, id_vars = ['year', 'champ', u'Âge révolu', u'Année de naissance'])
| gpl-3.0 |
huobaowangxi/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
PTDreamer/dRonin | python/ins/cins.py | 11 | 3838 |
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import ins
# this is the set of (currently) recommend INS settings. modified from
# https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav
default_mag_var = numpy.array([10.0, 10.0, 100.0])
default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4])
default_accel_var = numpy.array([0.01, 0.01, 0.01])
default_baro_var = 0.1
default_gps_var=numpy.array([1e-3,1e-2,10])
class CINS:
GRAV = 9.805
def __init__(self):
""" Creates the CINS class.
Important variables are
* X - the vector of state variables
* Xd - the vector of state derivatives for state and inputs
* Y - the vector of outputs for current state value
"""
self.state = []
def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None):
""" configure the INS parameters """
if mag_var is not None:
ins.configure(mag_var=mag_var)
if gyro_var is not None:
ins.configure(gyro_var=gyro_var)
if accel_var is not None:
ins.configure(accel_var=accel_var)
if baro_var is not None:
ins.configure(baro_var=baro_var)
if gps_var is not None:
ins.configure(gps_var=gps_var)
def prepare(self):
""" prepare the C INS wrapper
"""
self.state = ins.init()
self.configure(
mag_var=default_mag_var,
gyro_var=default_gyro_var,
accel_var=default_accel_var,
baro_var=default_baro_var,
gps_var=default_gps_var
)
def predict(self, gyros, accels, dT = 1.0/666.0):
""" Perform the prediction step
"""
self.state = ins.prediction(gyros, accels, dT)
def correction(self, pos=None, vel=None, mag=None, baro=None):
""" Perform the INS correction based on the provided corrections
"""
sensors = 0
Z = numpy.zeros((10,),numpy.float64)
# the masks must match the values in insgps.h
if pos is not None:
sensors = sensors | 0x0003
Z[0] = pos[0]
Z[1] = pos[1]
if vel is not None:
sensors = sensors | 0x0038
Z[3] = vel[0]
Z[4] = vel[1]
Z[5] = vel[2]
if mag is not None:
sensors = sensors | 0x01C0
Z[6] = mag[0]
Z[7] = mag[1]
Z[8] = mag[2]
if baro is not None:
sensors = sensors | 0x0200
Z[9] = baro
self.state = ins.correction(Z, sensors)
def test():
""" test the INS with simulated data
"""
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
sim = PyINS()
sim.prepare()
dT = 1.0 / 666.0
STEPS = 100000
history = numpy.zeros((STEPS,16))
history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
ROLL = 0.1
YAW = 0.2
sim.predict(U=[0,0,YAW, 0, PyINS.GRAV*sin(ROLL), -PyINS.GRAV*cos(ROLL) - 0.0], dT=dT)
history[k,:] = sim.state
history_rpy[k,:] = quat_rpy(sim.state[6:10])
times[k] = k * dT
angle = 0*numpy.pi/3 + YAW * dT * k # radians
height = 1.0 * k * dT
if True and k % 60 == 59:
sim.correction(pos=[[10],[5],[-height]])
if True and k % 60 == 59:
sim.correction(vel=[[0],[0],[-1]])
if k % 20 == 8:
sim.correction(baro=[height])
if True and k % 20 == 15:
sim.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]])
if k % 1000 == 0:
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],history[0:k:4,0:3])
ax[0][0].set_title('Position')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.draw()
fig.show()
plt.show()
if __name__ =='__main__':
test() | gpl-3.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-sncosmo/package.py | 5 | 1133 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySncosmo(PythonPackage):
"""SNCosmo is a Python library for high-level supernova cosmology
analysis."""
homepage = "http://sncosmo.readthedocs.io/"
url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz"
version('1.2.0', sha256='f3969eec5b25f60c70418dbd64765a2b4735bb53c210c61d0aab68916daea588')
# Required dependencies
# py-sncosmo binaries are duplicates of those from py-astropy
extends('python', ignore=r'bin/.*')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
# Recommended dependencies
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-iminuit', type=('build', 'run'))
depends_on('py-emcee', type=('build', 'run'))
depends_on('py-nestle', type=('build', 'run'))
| lgpl-2.1 |
DeepVisionTeam/TensorFlowBook | Titanic/data_processing.py | 2 | 4807 | import os
import re
import pandas as pd
import tensorflow as tf
pjoin = os.path.join
DATA_DIR = pjoin(os.path.dirname(__file__), 'data')
train_data = pd.read_csv(pjoin(DATA_DIR, 'train.csv'))
test_data = pd.read_csv(pjoin(DATA_DIR, 'test.csv'))
# Translation:
# Don: an honorific title used in Spain, Portugal, Italy
# Dona: Feminine form for don
# Mme: Madame, Mrs
# Mlle: Mademoiselle, Miss
# Jonkheer (female equivalent: Jonkvrouw) is a Dutch honorific of nobility
HONORABLE_TITLES = ['sir', 'lady', 'don', 'dona', 'countess', 'jonkheer',
'major', 'col', 'dr', 'master', 'capt']
NORMAL_TITLES = ['mr', 'ms', 'mrs', 'miss', 'mme', 'mlle', 'rev']
TITLES = HONORABLE_TITLES + NORMAL_TITLES
def get_title(name):
title_search = re.search('([A-Za-z]+)\.', name)
return title_search.group(1).lower()
def get_family(row):
last_name = row['Name'].split(",")[0]
if last_name:
family_size = 1 + row['Parch'] + row['SibSp']
if family_size > 3:
return "{0}_{1}".format(last_name.lower(), family_size)
else:
return "nofamily"
else:
return "unknown"
def get_deck(cabin):
if pd.isnull(cabin):
return 'U'
return cabin[:1]
class TitanicDigest(object):
def __init__(self, dataset):
self.count_by_sex = dataset.groupby('Sex')['PassengerId'].count()
self.mean_age = dataset['Age'].mean()
self.mean_age_by_sex = dataset.groupby("Sex")["Age"].mean()
self.mean_fare_by_class = dataset.groupby("Pclass")["Fare"].mean()
self.titles = TITLES
self.families = dataset.apply(get_family, axis=1).unique().tolist()
self.decks = dataset["Cabin"].apply(get_deck).unique().tolist()
self.embarkments = dataset.Embarked.unique().tolist()
self.embark_mode = dataset.Embarked.dropna().mode().values
def preprocess(data, digest):
# convert ['male', 'female'] values of Sex to [1, 0]
data['Sex'] = data['Sex'].apply(lambda s: 1 if s == 'male' else 0)
# fill empty age field with mean age
data['Age'] = data['Age'].apply(
lambda age: digest.mean_age if pd.isnull(age) else age)
# is child flag
data['Child'] = data['Age'].apply(lambda age: 1 if age <= 15 else 0)
# fill fare with mean fare of the class
def get_fare_value(row):
if pd.isnull(row['Fare']):
return digest.mean_fare_by_class[row['Pclass']]
else:
return row['Fare']
data['Fare'] = data.apply(get_fare_value, axis=1)
# fill Embarked with mode
data['Embarked'] = data['Embarked'].apply(
lambda e: digest.embark_mode if pd.isnull(e) else e)
data["EmbarkedF"] = data["Embarked"].apply(digest.embarkments.index)
#
data['Cabin'] = data['Cabin'].apply(lambda c: 'U0' if pd.isnull(c) else c)
# Deck
data["Deck"] = data["Cabin"].apply(lambda cabin: cabin[0])
data["DeckF"] = data['Deck'].apply(digest.decks.index)
data['Title'] = data['Name'].apply(get_title)
data['TitleF'] = data['Title'].apply(digest.titles.index)
data['Honor'] = data['Title'].apply(
lambda title: int(title in HONORABLE_TITLES))
data['Family'] = data.apply(get_family, axis=1)
if 'Survived' in data.keys():
data['Deceased'] = data['Survived'].apply(lambda s: int(not s))
return data
digest = TitanicDigest(train_data)
def get_train_data():
return preprocess(train_data, digest)
def get_test_data():
return preprocess(test_data, digest)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def transform_to_tfrecord():
data = pd.read_csv(pjoin(DATA_DIR, 'train.csv'))
filepath = pjoin(DATA_DIR, 'data.tfrecords')
writer = tf.python_io.TFRecordWriter(filepath)
for i in range(len(data)):
feature = {}
for key in data.keys():
value = data[key][i]
if isinstance(value, int):
value = tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
elif isinstance(value, float):
value = tf.train.Feature(
float_list=tf.train.FloatList(value=[value])
)
elif isinstance(value, str):
value = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[value.encode(encoding="utf-8")])
)
feature[key] = value
example = tf.train.Example(
features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
if __name__ == '__main__':
transform_to_tfrecord()
| apache-2.0 |
licco/zipline | zipline/history/history_container.py | 1 | 18509 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import groupby
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import (
index_at_dt,
)
from zipline.utils.data import RollingPanel
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(field,
buffer_frame,
digest_frame,
pre_digest_values):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# Get values which are NaN at the beginning of the period.
first_bar = buffer_frame.iloc[0]
def iter_nan_sids():
"""
Helper for iterating over the remaining nan sids in first_bar.
"""
return (sid for sid in first_bar[first_bar.isnull()].index)
# Try to fill with the last entry from the digest frame.
if digest_frame is not None:
# We don't store a digest frame for frequencies that only have a bar
# count of 1.
for sid in iter_nan_sids():
buffer_frame[sid][0] = digest_frame.ix[-1, sid]
# If we still have nan sids, try to fill with pre_digest_values.
for sid in iter_nan_sids():
prior_sid_value = pre_digest_values[field].get(sid)
if prior_sid_value:
# If the prior value is greater than the timestamp of our first
# bar.
if prior_sid_value.get('dt', first_bar.name) > first_bar.name:
buffer_frame[sid][0] = prior_sid_value.get('value', np.nan)
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(field, digest_frame, prior_values):
"""
Forward-fill a digest frame, falling back to the last known priof values if
necessary.
"""
if digest_frame is not None:
# Digest frame is None in the case that we only have length 1 history
# specs for a given frequency.
# It's possible that the first bar in our digest frame is storing NaN
# values. If so, check if we've tracked an older value and use that as
# an ffill value for the first bar.
first_bar = digest_frame.ix[0]
nan_sids = first_bar[first_bar.isnull()].index
for sid in nan_sids:
try:
# Only use prior value if it is before the index,
# so that a backfill does not accidentally occur.
if prior_values[field][sid]['dt'] <= digest_frame.index[0]:
digest_frame[sid][0] = prior_values[field][sid]['value']
except KeyError:
# Allow case where there is no previous value.
# e.g. with leading nans.
pass
digest_frame = digest_frame.ffill()
return digest_frame
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
def group_by_frequency(history_specs):
"""
Takes an iterable of history specs and returns a dictionary mapping unique
frequencies to a list of specs with that frequency.
Within each list, the HistorySpecs are sorted by ascending bar count.
Example:
[HistorySpec(3, '1d', 'price', True),
HistorySpec(2, '2d', 'open', True),
HistorySpec(2, '1d', 'open', False),
HistorySpec(5, '1m', 'open', True)]
yields
{Frequency('1d') : [HistorySpec(2, '1d', 'open', False)],
HistorySpec(3, '1d', 'price', True),
Frequency('2d') : [HistorySpec(2, '2d', 'open', True)],
Frequency('1m') : [HistorySpec(5, '1m', 'open', True)]}
"""
return {key: list(group)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
def __init__(self, history_specs, initial_sids, initial_dt):
# History specs to be served by this container.
self.history_specs = history_specs
self.frequency_groups = \
group_by_frequency(itervalues(self.history_specs))
# The set of fields specified by all history specs
self.fields = set(spec.field for spec in itervalues(history_specs))
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(
initial_sids,
initial_dt,
)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Populating initial frames here, so that the cost of creating the
# initial frames does not show up when profiling. These frames are
# cached since mid-stream creation of containing data frames on every
# bar is expensive.
self.create_return_frames(initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = {field: {} for field in self.fields}
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.frequency_groups)
def create_digest_panels(self, initial_sids, initial_dt):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
Relies on the fact that group_by_frequency sorts the value lists by
ascending bar count.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, specs in iteritems(self.frequency_groups):
# Relying on the sorting of group_by_frequency to get the spec
# requiring the largest number of bars.
largest_spec = specs[-1]
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.window_open(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
initial_dates = index_at_dt(largest_spec, initial_dt)
# Set up dates for our first digest roll, which is keyed to the
# close of the first entry in our initial index.
first_window_closes[freq] = initial_dates[0]
first_window_starts[freq] = freq.window_open(initial_dates[0])
rp = RollingPanel(len(initial_dates) - 1,
self.fields,
initial_sids)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_sids, initial_dt):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(freq.max_minutes
for freq in self.unique_frequencies)
rp = RollingPanel(
max_bars_needed,
self.fields,
initial_sids,
# Restrict the initial data down to just the fields being used in
# this container.
)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def create_return_frames(self, algo_dt):
"""
Populates the return frame cache.
Called during init and at universe rollovers.
"""
self.return_frames = {}
for spec_key, history_spec in iteritems(self.history_specs):
index = pd.to_datetime(index_at_dt(history_spec, algo_dt))
frame = pd.DataFrame(
index=index,
columns=self.convert_columns(
self.buffer_panel.minor_axis.values),
dtype=np.float64)
self.return_frames[spec_key] = frame
def buffer_panel_minutes(self,
buffer_panel=None,
earliest_minute=None,
latest_minute=None):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@last_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object. If no panel is supplied, we use self.buffer_panel.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
buffer_panel = buffer_panel or self.buffer_panel
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current()
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
self.update_digest_panels(algo_dt, self.buffer_panel)
fields = self.fields
frame = pd.DataFrame(
{sid: {field: bar[field] for field in fields}
for sid, bar in data.iteritems()
if (bar
and
bar['dt'] == algo_dt
and
# Only use data which is keyed in the data panel.
# Prevents crashes due to custom data.
sid in self.buffer_panel.minor_axis)})
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
"""
for frequency in self.unique_frequencies:
if freq_filter is not None and not freq_filter(frequency):
continue
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
)
# Create a digest from minutes_to_process and add it to
# digest_panel.
self.roll(frequency,
digest_panel,
minutes_to_process,
latest_minute)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def roll(self, frequency, digest_panel, buffer_minutes, digest_dt):
"""
Package up minutes in @buffer_minutes insert that bar into
@digest_panel at index @last_minute, and update
self.cur_window_{starts|closes} for the given frequency.
"""
if digest_panel is None:
# This happens if the only spec we have at this frequency has a bar
# count of 1.
return
rolled = pd.DataFrame(
index=self.fields,
columns=buffer_minutes.minor_axis)
for field in self.fields:
if field in CLOSING_PRICE_FIELDS:
# Use the last close, or NaN if we have no minutes.
try:
prices = buffer_minutes.loc[field].ffill().iloc[-1]
except IndexError:
# Scalar assignment sets the value for all entries.
prices = np.nan
rolled.ix[field] = prices
elif field == 'open_price':
# Use the first open, or NaN if we have no minutes.
try:
opens = buffer_minutes.loc[field].bfill().iloc[0]
except IndexError:
# Scalar assignment sets the value for all entries.
opens = np.nan
rolled.ix['open_price'] = opens
elif field == 'volume':
# Volume is the sum of the volumes during the
# course of the period.
volumes = buffer_minutes.ix['volume'].sum().fillna(0)
rolled.ix['volume'] = volumes
elif field == 'high':
# Use the highest high.
highs = buffer_minutes.ix['high'].max()
rolled.ix['high'] = highs
elif field == 'low':
# Use the lowest low.
lows = buffer_minutes.ix['low'].min()
rolled.ix['low'] = lows
for sid, value in rolled.ix[field].iterkv():
if not np.isnan(value):
try:
prior_values = \
self.last_known_prior_values[field][sid]
except KeyError:
prior_values = {}
self.last_known_prior_values[field][sid] = \
prior_values
prior_values['dt'] = digest_dt
prior_values['value'] = value
digest_panel.add_frame(digest_dt, rolled)
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
bar_count = history_spec.bar_count
do_ffill = history_spec.ffill
index = pd.to_datetime(index_at_dt(history_spec, algo_dt))
return_frame = self.return_frames[history_spec.key_str]
# Overwrite the index.
# Not worrying about values here since the values are overwritten
# in the next step.
return_frame.index = index
if bar_count > 1:
# Get the last bar_count - 1 frames from our stored historical
# frames.
digest_panel = self.digest_panels[history_spec.frequency]\
.get_current()
digest_frame = digest_panel[field].copy().ix[1 - bar_count:]
else:
digest_frame = None
# Get minutes from our buffer panel to build the last row.
buffer_frame = self.buffer_panel_minutes(
earliest_minute=self.cur_window_starts[history_spec.frequency],
)[field]
if do_ffill:
digest_frame = ffill_digest_frame_from_prior_values(
field,
digest_frame,
self.last_known_prior_values,
)
buffer_frame = ffill_buffer_from_prior_values(
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
)
if digest_frame is not None:
return_frame.ix[:-1] = digest_frame.ix[:]
if field == 'volume':
return_frame.ix[algo_dt] = buffer_frame.fillna(0).sum()
elif field == 'high':
return_frame.ix[algo_dt] = buffer_frame.max()
elif field == 'low':
return_frame.ix[algo_dt] = buffer_frame.min()
elif field == 'open_price':
return_frame.ix[algo_dt] = buffer_frame.iloc[0]
else:
return_frame.ix[algo_dt] = buffer_frame.loc[algo_dt]
# Returning a copy of the DataFrame so that we don't crash if the user
# adds columns to the frame. Ideally we would just drop any added
# columns, but pandas 0.12.0 doesn't support in-place dropping of
# columns. We should re-evaluate this implementation once we're on a
# more up-to-date pandas.
return return_frame.copy()
| apache-2.0 |
IndraVikas/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
moonbury/notebooks | github/MatplotlibCookbook/Chapter 8/wx-supershape-1.py | 3 | 1121 | import wx, numpy
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
def supershape_radius(phi, a, b, m, n1, n2, n3):
theta = .25 * m * phi
cos = numpy.fabs(numpy.cos(theta) / a) ** n2
sin = numpy.fabs(numpy.sin(theta) / b) ** n3
r = (cos + sin) ** (-1. / n1)
r /= numpy.max(r)
return r
class SuperShapeFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title,
style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER,
size = (480, 480))
self.fig = Figure((6, 6), dpi = 80)
self.panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(FigureCanvasWxAgg(self.panel, -1, self.fig), 1)
self.panel.SetSizer(sizer)
self.draw_figure()
def draw_figure(self):
phi = numpy.linspace(0, 2 * numpy.pi, 1024)
r = supershape_radius(phi, 1, 1, 3, 2, 18, 18)
ax = self.fig.add_subplot(111, polar = True)
ax.plot(phi, r, lw = 3.)
self.fig.canvas.draw()
app = wx.App(redirect = True)
top = SuperShapeFrame(None, -1, 'SuperShape')
top.Show()
app.MainLoop()
| gpl-3.0 |
themrmax/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 11 | 25443 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=-1, verbose=2,
random_state=1, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
| bsd-3-clause |
mantidproject/mantid | qt/python/mantidqt/gui_helper.py | 3 | 5994 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy.QtWidgets import (QApplication) # noqa
from qtpy import QtCore, QtGui
import matplotlib
import sys
import os
try:
from mantid import __version__ as __mtd_version
from mantid import _bindir as __mtd_bin_dir
# convert to major.minor
__mtd_version = '.'.join(__mtd_version.split(".")[:2])
except ImportError: # mantid not found
__mtd_version = ''
__mtd_bin_dir=''
def set_matplotlib_backend():
'''MUST be called before anything tries to use matplotlib
This will set the backend if it hasn't been already. It also returns
the name of the backend to be the name to be used for importing the
correct matplotlib widgets.'''
backend = matplotlib.get_backend()
if backend.startswith('module://'):
if backend.endswith('qt4agg'):
backend = 'Qt4Agg'
elif backend.endswith('workbench') or backend.endswith('qt5agg'):
backend = 'Qt5Agg'
else:
from qtpy import PYQT4, PYQT5 # noqa
if PYQT5:
backend = 'Qt5Agg'
elif PYQT4:
backend = 'Qt4Agg'
else:
raise RuntimeError('Do not know which matplotlib backend to set')
matplotlib.use(backend)
return backend
def get_qapplication():
''' Example usage:
app, within_mantid = get_qapplication()
reducer = eventFilterGUI.MainWindow() # the main ui class in this file
reducer.show()
if not within_mantid:
sys.exit(app.exec_())'''
app = QApplication.instance()
if app:
return app, app.applicationName().lower().startswith('mantid')
else:
return QApplication(sys.argv), False
def __to_external_url(interface_name: str, section: str, external_url: str) -> QtCore.QUrl:
if not external_url:
template = 'http://docs.mantidproject.org/nightly/interfaces/{}/{}.html'
external_url = template.format(section, interface_name)
return QtCore.QUrl(external_url)
def __to_qthelp_url(interface_name: str, section: str, qt_url: str) -> str:
if qt_url:
return qt_url
else:
template = 'qthelp://org.sphinx.mantidproject.{}/doc/interfaces/{}/{}.html'
return template.format(__mtd_version, section, interface_name)
def __get_collection_file(collection_file: str) -> str:
if not collection_file:
if not __mtd_bin_dir:
return 'HELP COLLECTION FILE NOT FOUND'
else:
collection_file = os.path.join(__mtd_bin_dir, '../docs/qthelp/MantidProject.qhc')
return os.path.abspath(collection_file)
def show_interface_help(mantidplot_name, assistant_process, area: str='',
collection_file: str='',
qt_url: str='', external_url: str=""):
''' Shows the help page for a custom interface
@param mantidplot_name: used by showCustomInterfaceHelp
@param assistant_process: needs to be started/closed from outside (see example below)
@param collection_file: qth file containing the help in format used by qtassistant. The default is
``mantid._bindir + '../docs/qthelp/MantidProject.qhc'``
@param qt_url: location of the help in the qth file. The default value is
``qthelp://org.sphinx.mantidproject.{mtdversion}/doc/interfaces/{mantidplot_name}.html``.
@param external_url: location of external page to be displayed in the default browser. The default value is
``http://docs.mantidproject.org/nightly/interfaces/framework/{mantidplot_name}.html``
Example using defaults:
#in the __init__ function of the GUI add:
self.assistant_process = QtCore.QProcess(self)
self.mantidplot_name='DGS Planner'
#add a help function in the GUI
def help(self):
show_interface_help(self.mantidplot_name,
self.assistant_process)
#make sure you close the qtassistant when the GUI is closed
def closeEvent(self, event):
self.assistant_process.close()
self.assistant_process.waitForFinished()
event.accept()
'''
try:
# try using built-in help in mantid
import mantidqt
mantidqt.interfacemanager.InterfaceManager().showCustomInterfaceHelp(mantidplot_name, area)
except: #(ImportError, ModuleNotFoundError) raises the wrong type of error
# built-in help failed, try external qtassistant then give up and launch a browser
# cleanup previous version
assistant_process.close()
assistant_process.waitForFinished()
# where to expect qtassistant
helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
helpapp += 'assistant'
collection_file = __get_collection_file(collection_file)
if os.path.isfile(helpapp) and os.path.isfile(collection_file):
# try to find the collection file and launch qtassistant
args = ['-enableRemoteControl',
'-collectionFile', collection_file,
'-showUrl', __to_qthelp_url(mantidplot_name, area, qt_url)]
assistant_process.close()
assistant_process.waitForFinished()
assistant_process.start(helpapp, args)
else:
# give up and upen a URL in default browser
openUrl=QtGui.QDesktopServices.openUrl
sysenv=QtCore.QProcessEnvironment.systemEnvironment()
ldp=sysenv.value('LD_PRELOAD')
if ldp:
del os.environ['LD_PRELOAD']
# create a url to the help in the default location
openUrl(__to_external_url(mantidplot_name, area, external_url))
if ldp:
os.environ['LD_PRELOAD']=ldp
| gpl-3.0 |
brodeau/aerobulk | python/plot_tests/plot_station_asf.py | 1 | 9926 | #!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Post-diagnostic of STATION_ASF / L. Brodeau, 2019
import sys
from os import path as path
#from string import replace
import math
import numpy as nmp
from netCDF4 import Dataset,num2date
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
reload(sys)
sys.setdefaultencoding('utf8')
cy1 = '2016' ; # First year
cy2 = '2018' ; # Last year
jt0 = 0
jt0 = 17519
dir_figs='.'
size_fig=(13,7)
fig_ext='png'
clr_red = '#AD0000'
clr_blu = '#3749A3'
clr_gre = '#548F64'
clr_sat = '#ffed00'
clr_mod = '#008ab8'
rDPI=200.
L_ALGOS = [ 'COARE3p6' , 'ECMWF' , 'NCAR' ]
l_xtrns = [ '-noskin' , '-noskin' , '' ] ; # string to add to algo name (L_ALGOS) to get version without skin params turned on
l_color = [ '#ffed00' , '#008ab8' , '0.4' ] ; # colors to differentiate algos on the plot
l_width = [ 3 , 2 , 1 ] ; # line-width to differentiate algos on the plot
l_style = [ '-' , '-' , '--' ] ; # line-style
L_VNEM = [ 'qla' , 'qsb' , 'qt' , 'qlw' , 'taum' , 'dt_skin' ]
L_VARO = [ 'Qlat' , 'Qsen' , 'Qnet' , 'Qlw' , 'Tau' , 'dT_skin' ] ; # name of variable on figure
L_VARL = [ r'$Q_{lat}$', r'$Q_{sens}$' , r'$Q_{net}$' , r'$Q_{lw}$' , r'$|\tau|$' , r'$\Delta T_{skin}$' ] ; # name of variable in latex mode
L_VUNT = [ r'$W/m^2$' , r'$W/m^2$' , r'$W/m^2$' , r'$W/m^2$' , r'$N/m^2$' , 'K' ]
L_VMAX = [ 75. , 75. , 800. , 25. , 1.2 , -0.7 ]
L_VMIN = [ -250. , -125. , -400. , -150. , 0. , 0.7 ]
L_ANOM = [ True , True , True , True , True , False ]
#L_VNEM = [ 'qlw' ]
#L_VARO = [ 'Qlw' ] ; # name of variable on figure
#L_VARL = [ r'$Q_{lw}$' ] ; # name of variable in latex mode
#L_VUNT = [ r'$W/m^2$' ]
#L_VMAX = [ 25. ]
#L_VMIN = [ -150. ]
#L_ANOM = [ True ]
nb_algos = len(L_ALGOS) ; print(nb_algos)
# Getting arguments:
narg = len(sys.argv)
if narg != 2:
print 'Usage: '+sys.argv[0]+' <DIR_OUT_SASF>'; sys.exit(0)
cdir_data = sys.argv[1]
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Populating and checking existence of files to be read
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def chck4f(cf):
cmesg = 'ERROR: File '+cf+' does not exist !!!'
if not path.exists(cf): print cmesg ; sys.exit(0)
###cf_in = nmp.empty((), dtype="S10")
cf_in = [] ; cf_in_ns = []
for ja in range(nb_algos):
cfi = cdir_data+'/output/'+'STATION_ASF-'+L_ALGOS[ja]+'_1h_'+cy1+'0101_'+cy2+'1231_gridT.nc'
chck4f(cfi)
cf_in.append(cfi)
# Same but without skin params:
for ja in range(nb_algos):
cfi = cdir_data+'/output/'+'STATION_ASF-'+L_ALGOS[ja]+l_xtrns[ja]+'_1h_'+cy1+'0101_'+cy2+'1231_gridT.nc'
chck4f(cfi)
cf_in_ns.append(cfi)
print('Files we are goin to use:')
for ja in range(nb_algos): print(cf_in[ja])
print(' --- same without cool-skin/warm-layer:')
for ja in range(nb_algos): print(cf_in_ns[ja])
#-----------------------------------------------------------------
# Getting time array from the first file:
id_in = Dataset(cf_in[0])
vt = id_in.variables['time_counter'][jt0:]
cunit_t = id_in.variables['time_counter'].units
clndr_t = id_in.variables['time_counter'].calendar
id_in.close()
Nt = len(vt)
print(' "time" => units = '+cunit_t+', calendar = "'+clndr_t+'"')
vtime = num2date(vt, units=cunit_t) ; # something understandable!
ii=Nt/300
ib=max(ii-ii%10,1)
xticks_d=int(30*ib)
font_inf = { 'fontname':'Open Sans', 'fontweight':'normal', 'fontsize':14 }
nb_var = len(L_VNEM)
xF = nmp.zeros((Nt,nb_algos))
xFa = nmp.zeros((Nt,nb_algos))
for ctest in ['skin','noskin']:
for jv in range(nb_var):
print('\n *** Treating variable: '+L_VARO[jv]+' ('+ctest+') !')
for ja in range(nb_algos):
#
if ctest == 'skin': id_in = Dataset(cf_in[ja])
if ctest == 'noskin': id_in = Dataset(cf_in_ns[ja])
xF[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
fig = plt.figure(num = jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
for ja in range(nb_algos):
plt.plot(vtime, xF[:,ja], '-', color=l_color[ja], linestyle=l_style[ja], linewidth=l_width[ja], label=L_ALGOS[ja], zorder=10+ja)
ax1.set_ylim(L_VMIN[jv], L_VMAX[jv]) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate(cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig(L_VARO[jv]+'_'+ctest+'.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(jv)
if L_ANOM[jv]:
for ja in range(nb_algos): xFa[:,ja] = xF[:,ja] - nmp.mean(xF,axis=1)
if nmp.sum(xFa[:,:]) == 0.0:
print(' Well! Seems that for variable '+L_VARO[jv]+', choice of algo has no impact a all!')
print(' ==> skipping anomaly plot...')
else:
# Want a symetric y-range that makes sense for the anomaly we're looking at:
rmax = nmp.max(xFa) ; rmin = nmp.min(xFa)
rmax = max( abs(rmax) , abs(rmin) )
romagn = math.floor(math.log(rmax, 10)) ; # order of magnitude of the anomaly we're dealing with
rmlt = 10.**(int(romagn)) / 2.
yrng = math.copysign( math.ceil(abs(rmax)/rmlt)*rmlt , rmax)
#print 'yrng = ', yrng ; #sys.exit(0)
fig = plt.figure(num = 10+jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
for ja in range(nb_algos):
plt.plot(vtime, xFa[:,ja], '-', color=l_color[ja], linewidth=l_width[ja], label=L_ALGOS[ja], zorder=10+ja)
ax1.set_ylim(-yrng,yrng) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate('Anomaly of '+cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig(L_VARO[jv]+'_'+ctest+'_anomaly.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(10+jv)
# Difference skin vs noskin:
xFns = nmp.zeros((Nt,nb_algos))
for jv in range(nb_var-1):
print('\n *** Treating variable: '+L_VARO[jv]+' ('+ctest+') !')
for ja in range(nb_algos-1):
id_in = Dataset(cf_in[ja])
xF[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
#
id_in = Dataset(cf_in_ns[ja])
xFns[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
xFa[:,ja] = xF[:,ja] - xFns[:,ja] ; # difference!
# Want a symetric y-range that makes sense for the anomaly we're looking at:
rmax = nmp.max(xFa) ; rmin = nmp.min(xFa)
rmax = max( abs(rmax) , abs(rmin) )
romagn = math.floor(math.log(rmax, 10)) ; # order of magnitude of the anomaly we're dealing with
rmlt = 10.**(int(romagn)) / 2.
yrng = math.copysign( math.ceil(abs(rmax)/rmlt)*rmlt , rmax)
print 'yrng = ', yrng ; #sys.exit(0)
for ja in range(nb_algos-1):
calgo = L_ALGOS[ja]
if nmp.sum(xFa[:,ja]) == 0.0:
print(' Well! Seems that for variable '+L_VARO[jv]+', and algo '+calgo+', skin param has no impact')
print(' ==> skipping difference plot...')
else:
fig = plt.figure(num = jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
plt.plot(vtime, xFa[:,ja], '-', color=l_color[ja], linestyle=l_style[ja], linewidth=l_width[ja], label=None, zorder=10+ja)
ax1.set_ylim(-yrng,yrng) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
#plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate(cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig('diff_skin-noskin_'+L_VARO[jv]+'_'+calgo+'_'+ctest+'.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(jv)
| gpl-3.0 |
aflaxman/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 66 | 8261 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
def test_singular_values():
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
rpca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12)
# Compare to the Frobenius norm
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 12)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=3, algorithm='arpack',
random_state=rng)
rpca = TruncatedSVD(n_components=3, algorithm='randomized',
random_state=rng)
X_apca = apca.fit_transform(X)
X_rpca = rpca.fit_transform(X)
X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0))
X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0))
X_apca[:, 0] *= 3.142
X_apca[:, 1] *= 2.718
X_rpca[:, 0] *= 3.142
X_rpca[:, 1] *= 2.718
X_hat_apca = np.dot(X_apca, apca.components_)
X_hat_rpca = np.dot(X_rpca, rpca.components_)
apca.fit(X_hat_apca)
rpca.fit(X_hat_rpca)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
| bsd-3-clause |
moreati/pandashells | pandashells/lib/arg_lib.py | 7 | 6681 | from pandashells.lib import config_lib
def _check_for_recognized_args(*args):
"""
Raise an error if unrecognized argset is specified
"""
allowed_arg_set = set([
'io_in',
'io_out',
'example',
'xy_plotting',
'decorating',
])
in_arg_set = set(args)
unrecognized_set = in_arg_set - allowed_arg_set
if unrecognized_set:
msg = '{} not in allowed set {}'.format(unrecognized_set,
allowed_arg_set)
raise ValueError(msg)
def _io_in_adder(parser, config_dict, *args):
"""
Add input options to the parser
"""
in_arg_set = set(args)
if 'io_in' in in_arg_set:
group = parser.add_argument_group('Input Options')
# define the valid components
io_opt_list = ['csv', 'table', 'header', 'noheader']
# allow the option of supplying input column names
msg = 'Overwrite input column names with this list'
group.add_argument(
'--names', nargs='+', type=str, dest='names',
metavar="name", help=msg)
default_for_input = [
config_dict['io_input_type'],
config_dict['io_input_header']
]
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-i', '--input_options', nargs='+', type=str, dest='input_options',
metavar='option', default=default_for_input, choices=io_opt_list,
help=msg)
def _io_out_adder(parser, config_dict, *args):
"""
Add output options to the parser
"""
in_arg_set = set(args)
if 'io_out' in in_arg_set:
group = parser.add_argument_group('Output Options')
# define the valid components
io_opt_list = [
'csv', 'table', 'html', 'header', 'noheader', 'index', 'noindex',
]
# define the current defaults
default_for_output = [
config_dict['io_output_type'],
config_dict['io_output_header'],
config_dict['io_output_index']
]
# show the current defaults in the arg parser
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-o', '--output_options', nargs='+',
type=str, dest='output_options', metavar='option',
default=default_for_output, help=msg)
msg = (
'Replace NaNs with this string. '
'A string containing \'nan\' will set na_rep to numpy NaN. '
'Current default is {}'
).format(repr(str(config_dict['io_output_na_rep'])))
group.add_argument(
'--output_na_rep', nargs=1, type=str, dest='io_output_na_rep',
help=msg)
def _decorating_adder(parser, *args):
in_arg_set = set(args)
if 'decorating' in in_arg_set:
# get a list of valid plot styling info
context_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_context'][0][1]
theme_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_theme'][0][1]
palette_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_palette'][0][1]
group = parser.add_argument_group('Plot specific Options')
msg = "Set the x-limits for the plot"
group.add_argument(
'--xlim', nargs=2, type=float, dest='xlim',
metavar=('XMIN', 'XMAX'), help=msg)
msg = "Set the y-limits for the plot"
group.add_argument(
'--ylim', nargs=2, type=float, dest='ylim',
metavar=('YMIN', 'YMAX'), help=msg)
msg = "Draw x axis with log scale"
group.add_argument(
'--xlog', action='store_true', dest='xlog', default=False,
help=msg)
msg = "Draw y axis with log scale"
group.add_argument(
'--ylog', action='store_true', dest='ylog', default=False,
help=msg)
msg = "Set the x-label for the plot"
group.add_argument(
'--xlabel', nargs=1, type=str, dest='xlabel', help=msg)
msg = "Set the y-label for the plot"
group.add_argument(
'--ylabel', nargs=1, type=str, dest='ylabel', help=msg)
msg = "Set the title for the plot"
group.add_argument(
'--title', nargs=1, type=str, dest='title', help=msg)
msg = "Specify legend location"
group.add_argument(
'--legend', nargs=1, type=str, dest='legend',
choices=['1', '2', '3', '4', 'best'], help=msg)
msg = "Specify whether hide the grid or not"
group.add_argument(
'--nogrid', action='store_true', dest='no_grid', default=False,
help=msg)
msg = "Specify plot context. Default = '{}' ".format(context_list[0])
group.add_argument(
'--context', nargs=1, type=str, dest='plot_context',
default=[context_list[0]], choices=context_list, help=msg)
msg = "Specify plot theme. Default = '{}' ".format(theme_list[0])
group.add_argument(
'--theme', nargs=1, type=str, dest='plot_theme',
default=[theme_list[0]], choices=theme_list, help=msg)
msg = "Specify plot palette. Default = '{}' ".format(palette_list[0])
group.add_argument(
'--palette', nargs=1, type=str, dest='plot_palette',
default=[palette_list[0]], choices=palette_list, help=msg)
msg = "Save the figure to this file"
group.add_argument('--savefig', nargs=1, type=str, help=msg)
def _xy_adder(parser, *args):
in_arg_set = set(args)
if 'xy_plotting' in in_arg_set:
msg = 'Column to plot on x-axis'
parser.add_argument(
'-x', nargs=1, type=str, dest='x', metavar='col', help=msg)
msg = 'List of columns to plot on y-axis'
parser.add_argument(
'-y', nargs='+', type=str, dest='y', metavar='col', help=msg)
msg = "Plot style(s) defaults to .-"
parser.add_argument(
'-s', '--style', nargs='+', type=str, dest='style', default=['.-'],
help=msg, metavar='style')
def add_args(parser, *args):
"""Adds argument blocks to the arg parser
:type parser: argparse instance
:param parser: The argarse instance to use in adding arguments
Additinional arguments are the names of argument blocks to add
"""
config_dict = config_lib.get_config()
_check_for_recognized_args(*args)
_io_in_adder(parser, config_dict, *args)
_io_out_adder(parser, config_dict, *args)
_decorating_adder(parser, *args)
_xy_adder(parser, *args)
| bsd-2-clause |
rbiswas4/SNsims | snsims_previous/snsims/tmp/models.py | 1 | 2804 | #!/usr/bin/env python
import sncosmo.models
import numpy
class SEDFileSource(sncosmo.models.TimeSeriesSource):
"""A TimeSeriesSource stored in a 3-column ASCII file format, for PHASE,
LAMBDA, and F_LAMBDA. The hash symbol # is a comment line.
The spectral flux density of this model is given by
.. math::
F(t, \lambda) = A \\times M(t, \lambda)
where _M_ is the flux defined on a grid in phase and wavelength and _A_
(amplitude) is the single free parameter of the model. It should be noted
that while t and \lambda are in the rest frame of the object, the flux
density is defined at redshift zero. This means that for objects with the
same intrinsic luminosity, the amplitude will be smaller for objects at
larger luminosity distances.
Parameters
----------
filename : str
Name of the filename that contains the Time Series
zero_before : bool, optional
If True, flux at phases before minimum phase will be zeroed. The
default is False, in which case the flux at such phases will be equal
to the flux at the minimum phase (``flux[0, :]`` in the input array).
version : str, optional
Version of the model. Default is `None`.
Returns
-------
`~sncosmo.TimeSeriesSource` instance representing the TimeSeriesSource
in file
"""
_param_names = ['amplitude']
param_names_latex = ['A']
def __init__(self, filename, zero_before=False, version=None):
phase, wave, flux = numpy.loadtxt(filename, unpack=True)
# Convert 3 column format to that expected by TimeSeriesSource
phase_u = numpy.unique(phase)
wave_u = numpy.unique(wave)
lenp = len(phase_u)
lenw = len(wave_u)
if lenp * lenw != len(flux):
raise TypeError('File is not a TimeSeriesSource')
i = numpy.zeros(len(flux), dtype='int')
j = numpy.zeros(len(flux), dtype='int')
for index, p in enumerate(phase_u):
i[phase == p] = index
for index, w in enumerate(wave_u):
j[wave == w] = index
flux = flux[i * lenw + j]
flux = numpy.reshape(flux, (lenp, lenw))
super(SEDFileSource, self).__init__(phase_u, wave_u, flux,
zero_before=False,
name=filename, version=None)
if __name__ == '__main__':
# filename = '/Users/akim/project/SNDATA_ROOT/snsed/NON1A/SDSS-019323.SED'
# data = SEDFileSource(filename)
sn = sncosmo.Model(source='snana-2007nc')
print sn.param_names
# wefwe
import matplotlib.pyplot as plt
plt.plot(data._wave, data.flux(0, data._wave))
plt.plot(sn.source._wave, sn.flux(0, sn.source._wave) * 0.95)
plt.show()
| mit |
Ttl/scikit-rf | skrf/io/general.py | 3 | 22567 |
'''
.. module:: skrf.io.general
========================================
general (:mod:`skrf.io.general`)
========================================
General io functions for reading and writing skrf objects
.. autosummary::
:toctree: generated/
read
read_all
read_all_networks
write
write_all
save_sesh
Writing output to spreadsheet
.. autosummary::
:toctree: generated/
network_2_spreadsheet
networkset_2_spreadsheet
'''
import sys
import six.moves.cPickle as pickle
from six.moves.cPickle import UnpicklingError
import inspect
import os
import zipfile
import warnings
import sys
from ..util import get_extn, get_fid
from ..network import Network
from ..frequency import Frequency
from ..media import Media
from ..networkSet import NetworkSet
from ..calibration.calibration import Calibration
from copy import copy
dir_ = copy(dir)
# delayed import: from pandas import DataFrame, Series for ntwk_2_spreadsheet
# file extension conventions for skrf objects.
global OBJ_EXTN
OBJ_EXTN = [
[Frequency, 'freq'],
[Network, 'ntwk'],
[NetworkSet, 'ns'],
[Calibration, 'cal'],
[Media, 'med'],
[object, 'p'],
]
def read(file, *args, **kwargs):
'''
Read skrf object[s] from a pickle file
Reads a skrf object that is written with :func:`write`, which uses
the :mod:`pickle` module.
Parameters
------------
file : str or file-object
name of file, or a file-object
\*args, \*\*kwargs : arguments and keyword arguments
passed through to pickle.load
Examples
-------------
>>> n = rf.Network(f=[1,2,3],s=[1,1,1],z0=50)
>>> n.write('my_ntwk.ntwk')
>>> n_2 = rf.read('my_ntwk.ntwk')
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Notes
-------
if `file` is a file-object it is left open, if it is a filename then
a file-object is opened and closed. If file is a file-object
and reading fails, then the position is reset back to 0 using seek
if possible.
'''
fid = get_fid(file, mode='rb')
try:
obj = pickle.load(fid, *args, **kwargs)
except (UnpicklingError, UnicodeDecodeError) as e:
# if fid is seekable then reset to beginning of file
fid.seek(0)
if isinstance(file, str):
# we created the fid so close it
fid.close()
raise
if isinstance(file, str):
# we created the fid so close it
fid.close()
return obj
def write(file, obj, overwrite = True):
'''
Write skrf object[s] to a file
This uses the :mod:`pickle` module to write skrf objects to a file.
Note that you can write any pickl-able python object. For example,
you can write a list or dictionary of :class:`~skrf.network.Network`
objects
or :class:`~skrf.calibration.calibration.Calibration` objects. This
will write out a single file. If you would like to write out a
seperate file for each object, use :func:`write_all`.
Parameters
------------
file : file or string
File or filename to which the data is saved. If file is a
file-object, then the filename is unchanged. If file is a
string, an appropriate extension will be appended to the file
name if it does not already have an extension.
obj : an object, or list/dict of objects
object or list/dict of objects to write to disk
overwrite : Boolean
if file exists, should it be overwritten?
Notes
-------
If `file` is a str, but doesnt contain a suffix, one is chosen
automatically. Here are the extensions
==================================================== ===============
skrf object extension
==================================================== ===============
:class:`~skrf.frequency.Frequency` '.freq'
:class:`~skrf.network.Network` '.ntwk'
:class:`~skrf.networkSet.NetworkSet` '.ns'
:class:`~skrf.calibration.calibration.Calibration` '.cal'
:class:`~skrf.media.media.Media` '.med'
other '.p'
==================================================== ===============
To make the file written by this method cross-platform, the pickling
protocol 2 is used. See :mod:`pickle` for more info.
Examples
-------------
Convert a touchstone file to a pickled Network,
>>> n = rf.Network('my_ntwk.s2p')
>>> rf.write('my_ntwk',n)
>>> n_red = rf.read('my_ntwk.ntwk')
Writing a list of different objects
>>> n = rf.Network('my_ntwk.s2p')
>>> ns = rf.NetworkSet([n,n,n])
>>> rf.write('out',[n,ns])
>>> n_red = rf.read('out.p')
See Also
------------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
skrf.network.Network.write : write method of Network
skrf.calibration.calibration.Calibration.write : write method of Calibration
'''
if isinstance(file, str):
extn = get_extn(file)
if extn is None:
# if there is not extension add one
for obj_extn in OBJ_EXTN:
if isinstance(obj, obj_extn[0]):
extn = obj_extn[1]
break
file = file + '.' + extn
if os.path.exists(file):
if not overwrite:
warnings.warn('file exists, and overwrite option is False. Not writing.')
return
with open(file, 'wb') as fid:
pickle.dump(obj, fid, protocol=2)
else:
fid = file
pickle.dump(obj, fid, protocol=2)
fid.close()
def read_all(dir='.', contains = None, f_unit = None, obj_type=None):
'''
Read all skrf objects in a directory
Attempts to load all files in `dir`, using :func:`read`. Any file
that is not readable by skrf is skipped. Optionally, simple filtering
can be achieved through the use of `contains` argument.
Parameters
--------------
dir : str, optional
the directory to load from, default \'.\'
contains : str, optional
if not None, only files containing this substring will be loaded
f_unit : ['hz','khz','mhz','ghz','thz']
for all :class:`~skrf.network.Network` objects, set their
frequencies's :attr:`~skrf.frequency.Frequency.f_unit`
obj_type : str
Name of skrf object types to read (ie 'Network')
Returns
---------
out : dictionary
dictionary containing all loaded skrf objects. keys are the
filenames without extensions, and the values are the objects
Examples
----------
>>> rf.read_all('skrf/data/')
{'delay_short': 1-Port Network: 'delay_short', 75-110 GHz, 201 pts, z0=[ 50.+0.j],
'line': 2-Port Network: 'line', 75-110 GHz, 201 pts, z0=[ 50.+0.j 50.+0.j],
'ntwk1': 2-Port Network: 'ntwk1', 1-10 GHz, 91 pts, z0=[ 50.+0.j 50.+0.j],
'one_port': one port Calibration: 'one_port', 500-750 GHz, 201 pts, 4-ideals/4-measured,
...
>>> rf.read_all('skrf/data/', obj_type = 'Network')
{'delay_short': 1-Port Network: 'delay_short', 75-110 GHz, 201 pts, z0=[ 50.+0.j],
'line': 2-Port Network: 'line', 75-110 GHz, 201 pts, z0=[ 50.+0.j 50.+0.j],
'ntwk1': 2-Port Network: 'ntwk1', 1-10 GHz, 91 pts, z0=[ 50.+0.j 50.+0.j],
...
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
'''
out={}
for filename in os.listdir(dir):
if contains is not None and contains not in filename:
continue
fullname = os.path.join(dir,filename)
keyname = os.path.splitext(filename)[0]
try:
out[keyname] = read(fullname)
continue
except:
pass
try:
out[keyname] = Network(fullname)
continue
except:
pass
if f_unit is not None:
for keyname in out:
try:
out[keyname].frequency.unit = f_unit
except:
pass
if obj_type is not None:
out = dict([(k, out[k]) for k in out if
isinstance(out[k],sys.modules[__name__].__dict__[obj_type])])
return out
def read_all_networks(*args, **kwargs):
'''
Read all networks in a directory.
This is a convenience function. It just calls::
read_all(*args,obj_type='Network', **kwargs)
See Also
----------
read_all
'''
if 'f_unit' not in kwargs:
kwargs.update({'f_unit':'ghz'})
return read_all(*args,obj_type='Network', **kwargs)
ran = read_all_networks
def write_all(dict_objs, dir='.', *args, **kwargs):
'''
Write a dictionary of skrf objects individual files in `dir`.
Each object is written to its own file. The filename used for each
object is taken from its key in the dictionary. If no extension
exists in the key, then one is added. See :func:`write` for a list
of extensions. If you would like to write the dictionary to a single
output file use :func:`write`.
Notes
-------
Any object in dict_objs that is pickl-able will be written.
Parameters
------------
dict_objs : dict
dictionary of skrf objects
dir : str
directory to save skrf objects into
\*args, \*\*kwargs :
passed through to :func:`~skrf.io.general.write`. `overwrite`
option may be of use.
See Also
-----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Examples
----------
Writing a diction of different skrf objects
>>> from skrf.data import line, short
>>> d = {'ring_slot':ring_slot, 'one_port_cal':one_port_cal}
>>> rf.write_all(d)
'''
if not os.path.exists('.'):
raise OSError('No such directory: %s'%dir)
for k in dict_objs:
filename = k
obj = dict_objs[k]
extn = get_extn(filename)
if extn is None:
# if there is not extension add one
for obj_extn in OBJ_EXTN:
if isinstance(obj, obj_extn[0]):
extn = obj_extn[1]
break
filename = filename + '.' + extn
try:
with open(os.path.join(dir+'/', filename), 'wb') as fid:
write(fid, obj,*args, **kwargs)
except Exception as inst:
print(inst)
warnings.warn('couldnt write %s: %s'%(k,str(inst)))
pass
def save_sesh(dict_objs, file='skrfSesh.p', module='skrf', exclude_prefix='_'):
'''
Save all `skrf` objects in the local namespace.
This is used to save current workspace in a hurry, by passing it the
output of :func:`locals` (see Examples). Note this can be
used for other modules as well by passing a different `module` name.
Parameters
------------
dict_objs : dict
dictionary containing `skrf` objects. See the Example.
file : str or file-object, optional
the file to save all objects to
module : str, optional
the module name to grep for.
exclude_prefix: str, optional
dont save objects which have this as a prefix.
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Examples
---------
Write out all skrf objects in current namespace.
>>> rf.write_all(locals(), 'mysesh.p')
'''
objects = {}
print('pickling: ')
for k in dict_objs:
try:
if module in inspect.getmodule(dict_objs[k]).__name__:
try:
pickle.dumps(dict_objs[k])
if k[0] != '_':
objects[k] = dict_objs[k]
print(k+', ')
finally:
pass
except(AttributeError, TypeError):
pass
if len (objects ) == 0:
print('nothing')
write(file, objects)
def load_all_touchstones(dir = '.', contains=None, f_unit=None):
'''
Loads all touchtone files in a given dir into a dictionary.
Notes
-------
Alternatively you can use the :func:`read_all` function.
Parameters
-----------
dir : string
the path
contains : string
a string the filenames must contain to be loaded.
f_unit : ['hz','mhz','ghz']
the frequency unit to assign all loaded networks. see
:attr:`frequency.Frequency.unit`.
Returns
---------
ntwkDict : a dictonary with keys equal to the file name (without
a suffix), and values equal to the corresponding ntwk types
Examples
----------
>>> ntwk_dict = rf.load_all_touchstones('.', contains ='20v')
See Also
-----------
read_all
'''
ntwkDict = {}
for f in os.listdir (dir):
if contains is not None and contains not in f:
continue
fullname = os.path.join(dir,f)
keyname,extn = os.path.splitext(f)
extn = extn.lower()
try:
if extn[1]== 's' and extn[-1]=='p':
ntwkDict[keyname]=(Network(dir +'/'+f))
if f_unit is not None: ntwkDict[keyname].frequency.unit=f_unit
except:
pass
return ntwkDict
def write_dict_of_networks(ntwkDict, dir='.'):
'''
Saves a dictionary of networks touchstone files in a given directory
The filenames assigned to the touchstone files are taken from
the keys of the dictionary.
Parameters
-----------
ntwkDict : dictionary
dictionary of :class:`Network` objects
dir : string
directory to write touchstone file to
'''
warnings.warn('Deprecated. use write_all.', DeprecationWarning)
for ntwkKey in ntwkDict:
ntwkDict[ntwkKey].write_touchstone(filename = dir+'/'+ntwkKey)
def read_csv(filename):
'''
Read a 2-port s-parameter data from a csv file.
Specifically, this reads a two-port csv file saved from a Rohde Shcwarz
ZVA-40, and possibly other network analyzers. It returns into a
:class:`Network` object.
Parameters
------------
filename : str
name of file
Returns
--------
ntwk : :class:`Network` object
the network representing data in the csv file
'''
ntwk = Network(name=filename[:-4])
try:
data = npy.loadtxt(filename, skiprows=3,delimiter=',',\
usecols=range(9))
s11 = data[:,1] +1j*data[:,2]
s21 = data[:,3] +1j*data[:,4]
s12 = data[:,5] +1j*data[:,6]
s22 = data[:,7] +1j*data[:,8]
ntwk.s = npy.array([[s11, s21],[s12,s22]]).transpose().reshape(-1,2,2)
except(IndexError):
data = npy.loadtxt(filename, skiprows=3,delimiter=',',\
usecols=range(3))
ntwk.s = data[:,1] +1j*data[:,2]
ntwk.frequency.f = data[:,0]
ntwk.frequency.unit='ghz'
return ntwk
## file conversion
def statistical_2_touchstone(file_name, new_file_name=None,\
header_string='# GHz S RI R 50.0'):
'''
Converts Statistical file to a touchstone file.
Converts the file format used by Statistical and other Dylan Williams
software to standard touchstone format.
Parameters
------------
file_name : string
name of file to convert
new_file_name : string
name of new file to write out (including extension)
header_string : string
touchstone header written to first beginning of file
'''
if new_file_name is None:
new_file_name = 'tmp-'+file_name
remove_tmp_file = True
# This breaks compatibility with python 2.6 and older
with file(file_name, 'r') as old_file, open(new_file_name, 'w') as new_file:
new_file.write('%s\n'%header_string)
for line in old_file:
new_file.write(line)
if remove_tmp_file is True:
os.rename(new_file_name,file_name)
def network_2_spreadsheet(ntwk, file_name =None, file_type= 'excel', form='db',
*args, **kwargs):
'''
Write a Network object to a spreadsheet, for your boss
Write the s-parameters of a network to a spreadsheet, in a variety
of forms. This functions makes use of the pandas module, which in
turn makes use of the xlrd module. These are imported during this
function call. For more details about the file-writing functions
see the pandas.DataFrom.to_?? functions.
Notes
------
The frequency unit used in the spreadsheet is take from
`ntwk.frequency.unit`
Parameters
-----------
ntwk : :class:`~skrf.network.Network` object
the network to write
file_name : str, None
the file_name to write. if None, ntwk.name is used.
file_type : ['csv','excel','html']
the type of file to write. See pandas.DataFrame.to_??? functions.
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
\*args, \*\*kwargs :
passed to pandas.DataFrame.to_??? functions.
See Also
---------
networkset_2_spreadsheet : writes a spreadsheet for many networks
'''
from pandas import DataFrame, Series # delayed because its not a requirement
file_extns = {'csv':'csv','excel':'xls','html':'html'}
form = form.lower()
if form not in ['db','ri','ma']:
raise ValueError('`form` must be either `db`,`ma`,`ri`')
file_type = file_type.lower()
if file_type not in file_extns.keys():
raise ValueError('file_type must be `csv`,`html`,`excel` ')
if ntwk.name is None and file_name is None:
raise ValueError('Either ntwk must have name or give a file_name')
if file_name is None and 'excel_writer' not in kwargs.keys():
file_name = ntwk.name + '.'+file_extns[file_type]
d = {}
index =ntwk.frequency.f_scaled
if form =='db':
for m,n in ntwk.port_tuples:
d['S%i%i Log Mag(dB)'%(m+1,n+1)] = \
Series(ntwk.s_db[:,m,n], index = index)
d[u'S%i%i Phase(deg)'%(m+1,n+1)] = \
Series(ntwk.s_deg[:,m,n], index = index)
elif form =='ma':
for m,n in ntwk.port_tuples:
d['S%i%i Mag(lin)'%(m+1,n+1)] = \
Series(ntwk.s_mag[:,m,n], index = index)
d[u'S%i%i Phase(deg)'%(m+1,n+1)] = \
Series(ntwk.s_deg[:,m,n], index = index)
elif form =='ri':
for m,n in ntwk.port_tuples:
d['S%i%i Real'%(m+1,n+1)] = \
Series(ntwk.s_re[:,m,n], index = index)
d[u'S%i%i Imag'%(m+1,n+1)] = \
Series(ntwk.s_im[:,m,n], index = index)
df = DataFrame(d)
df.__getattribute__('to_%s'%file_type)(file_name,
index_label='Freq(%s)'%ntwk.frequency.unit, *args, **kwargs)
def network_2_dataframe(ntwk, attrs=['s_db'], ports = None):
'''
Convert one or more attributes of a network to a pandas DataFrame
Parameters
--------------
ntwk : :class:`~skrf.network.Network` object
the network to write
attrs : list Network attributes
like ['s_db','s_deg']
ports : list of tuples
list of port pairs to write. defaults to ntwk.port_tuples
(like [[0,0]])
Returns
----------
df : pandas DataFrame Object
'''
from pandas import DataFrame, Series # delayed because its not a requirement
d = {}
index =ntwk.frequency.f_scaled
if ports is None:
ports = ntwk.port_tuples
for attr in attrs:
for m,n in ports:
d['%s %i%i'%(attr, m+1,n+1)] = \
Series(ntwk.__getattribute__(attr)[:,m,n], index = index)
return DataFrame(d)
def networkset_2_spreadsheet(ntwkset, file_name=None, file_type= 'excel',
*args, **kwargs):
'''
Write a NetworkSet object to a spreadsheet, for your boss
Write the s-parameters of a each network in the networkset to a
spreadsheet. If the `excel` file_type is used, then each network,
is written to its own sheet, with the sheetname taken from the
network `name` attribute.
This functions makes use of the pandas module, which in turn makes
use of the xlrd module. These are imported during this function
Notes
------
The frequency unit used in the spreadsheet is take from
`ntwk.frequency.unit`
Parameters
-----------
ntwkset : :class:`~skrf.networkSet.NetworkSet` object
the network to write
file_name : str, None
the file_name to write. if None, ntwk.name is used.
file_type : ['csv','excel','html']
the type of file to write. See pandas.DataFrame.to_??? functions.
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
\*args, \*\*kwargs :
passed to pandas.DataFrame.to_??? functions.
See Also
---------
networkset_2_spreadsheet : writes a spreadsheet for many networks
'''
from pandas import DataFrame, Series, ExcelWriter # delayed because its not a requirement
if ntwkset.name is None and file_name is None:
raise(ValueError('Either ntwkset must have name or give a file_name'))
if file_type == 'excel':
writer = ExcelWriter(file_name)
[network_2_spreadsheet(k, writer, sheet_name =k.name, *args, **kwargs) for k in ntwkset]
writer.save()
else:
[network_2_spreadsheet(k,*args, **kwargs) for k in ntwkset]
# Provide a StringBuffer that let's me work with Python2 strings and Python3 unicode strings without thinking
if sys.version_info < (3, 0):
import StringIO
class StringBuffer(StringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
else:
import io
StringBuffer = io.StringIO
| bsd-3-clause |
faroit/loudness | python/tests/test_OME.py | 1 | 2084 | import numpy as np
import matplotlib.pyplot as plt
import loudness as ln
def plotResponse(freqPoints, dataPoints,
freqsInterp, responseInterp,
ylim=(-40, 10), title = ""):
if np.any(dataPoints):
plt.semilogx(freqPoints, dataPoints, 'o')
plt.semilogx(freqsInterp, responseInterp)
plt.xlim(20, 20e3)
plt.ylim(ylim)
plt.xlabel("Frequency, Hz")
plt.ylabel("Response, dB")
plt.title(title)
plt.show()
def plotMiddleEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(filterType, ln.OME.NONE)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getMiddleEarFreqPoints()
dataPoints = ome.getMiddleEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotOuterEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(ln.OME.NONE, filterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getOuterEarFreqPoints()
dataPoints = ome.getOuterEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotCombined(middleFilterType, outerFilterType, ylim=(-40, 10)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(middleFilterType, outerFilterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
plotResponse(None, None,
freqs, response, ylim)
plt.figure(1)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR, (-40, 0))
plt.figure(2)
plotMiddleEar(ln.OME.CHGM2011_MIDDLE_EAR, (-40, 10))
plt.figure(2)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR_HPF, (-40, 0))
plt.figure(3)
plotOuterEar(ln.OME.ANSIS342007_FREEFIELD, (-5, 20))
plt.figure(4)
plotOuterEar(ln.OME.ANSIS342007_DIFFUSEFIELD, (-5, 20))
plt.figure(5)
plotOuterEar(ln.OME.BD_DT990, (-10, 10))
plt.figure(6)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR,
ln.OME.ANSIS342007_FREEFIELD, (-40, 10))
plt.figure(7)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR, ln.OME.BD_DT990, (-40, 10))
| gpl-3.0 |
INCF/BIDS2ISATab | setup.py | 1 | 2176 | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
setup(
name="BIDS2ISATab",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description="Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure "
"compatible dataset.",
long_description="Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure "
"compatible dataset.",
# The project URL.
url='https://github.com/INCF/BIDS2ISATab',
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='bids isatab',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=["bids2isatab"],
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ["future",
"pandas",
'nibabel'],
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'bids2isatab=bids2isatab.main:main',
],
},
)
| apache-2.0 |
rizac/gfz-reportgen | gfzreport/sphinxbuild/map/__init__.py | 2 | 43603 | '''
This module implements the function `plotmap` which plots scattered points on a map
retrieved using ArgGIS Server REST API. The function is highly customizable and is basically a
wrapper around the `Basemap` library (for the map background)
plus matplotlib utilities (for plotting points, shapes, labels and legend)
Created on Mar 10, 2016
@author: riccardo
'''
import numpy as np
import re
from itertools import izip, chain
from urllib2 import URLError, HTTPError
import socket
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.basemap import Basemap
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
def parse_margins(obj, parsefunc=lambda margins: [float(val) for val in margins]):
"""Parses obj returning a 4 element numpy array denoting the top, right, bottom and left
values. This function first converts obj to a 4 element list L, and then
calls `parsefunc`, which by default converts all L values into float
:param obj: either None, a number, a list of numbers (allowed lengths: 1 to 4),
a comma/semicolon/spaces separated string (e.g. "4deg 0.0", "1, 1.2", "2km,4deg", "1 ; 2")
:param parsefunc: a function to be applied to obj converted to list. By default, returns
float(v) for any v in L
:return: a 4 element numpy array of floats denoting the top, right, bottom, left values of
the margins. The idea is the same as css margins, as depicted in the table below.
:Examples:
Called f `parsefunc`, then:
============= =========================
obj is returns
============= =========================
None [0, 0, 0, 0]
------------- -------------------------
string the list obtained after
splitting string via
regexp where comma,
semicolon and spaces
are valid separators
------------- -------------------------
x or [x] parsefunc([x, x, x, x])
------------- -------------------------
[x, y] parsefunc([x, y ,x, y])
------------- -------------------------
[x, y, z] parsefunc([x, y, z, y])
------------- -------------------------
[x, y, z, t] parsefunc([x, y, z, t])
============= =========================
"""
if obj is None:
margins = [0] * 4
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
# is an iterable not string. Note the if above is py2 py3 compatible
margins = list(obj)
else:
try:
margins = [float(obj)] * 4
except (TypeError, ValueError):
margins = re.compile("(?:\\s*,\\s*|\\s*;\\s*|\\s+)").split(obj)
if len(margins) == 1:
margins *= 4
elif len(margins) == 2:
margins *= 2
elif len(margins) == 3:
margins.append(margins[1])
elif len(margins) != 4:
raise ValueError("unable to parse margins on invalid value '%s'" % obj)
return np.asarray(parsefunc(margins) if hasattr(parsefunc, "__call__") else margins)
# return margins
def parse_distance(dist, lat_0=None):
"""Returns the distance in degrees. If dist is in km or m, and lat_0 is not None,
returns w2lon, else h2lat. dist None defaults to 0
:param dist: float, int None, string. If string and has a unit, see above
"""
try:
return 0 if dist is None else float(dist)
except ValueError:
if dist[-3:].lower() == 'deg':
return float(dist[:-3])
elif dist[-2:] == 'km':
dst = 1000 * float(dist[:-2])
elif dist[-1:] == 'm':
dst = float(dist[:1])
else:
raise
return w2lon(dst, lat_0) if lat_0 is not None else h2lat(dst)
def get_lon0_lat0(min_lons, min_lats, max_lons, max_lats):
""" Calculates lat_0, lon_0, i.e., the mid point of the bounding box denoted by the
arguments
:param min_lons: the minimum of longitudes
:param min_lats: the maximum of latitudes
:param max_lons: the minimum of longitudes
:param max_lats: the maximum of latitudes
:return: the 2-element tuple denoting the mid point lon_0, lat_0
"""
lat_0 = max_lats / 2. + min_lats / 2.
lon_0 = max_lons / 2. + min_lons / 2.
if lon_0 > 180: # FIXME: necessary?? see self.get_normalized... above
lon_0 -= 360
return lon_0, lat_0
def getbounds(min_lon, min_lat, max_lon, max_lat, margins):
"""Calculates the bounds given the bounding box identified by the arguments and
given optional margins
:param min_lon: the minimum longitude (numeric, scalar)
:param min_lat: the maximum latitude (numeric, scalar)
:param max_lon: the minimum longitude (numeric, scalar)
:param max_lat: the maximum latitude (numeric, scalar)
:param margins: the margins as a css-like string (with units 'deg', 'km' or 'm'), or as
a 1 to 4 element array of numeric values (in that case denoting degrees).
As in css, a 4 element array denotes the [top, right, bottom, left] values.
None defaults to [0, 0, 0, 0].
:return: the 6-element tuple denoting lon_0, lat_0, min_lon, min_lat, max_lon, max_lat.
where min_lon, min_lat, max_lon, max_lat are the new bounds and lon_0 and lat_0 are
their midpoints (x and y, respectively)
"""
def parsefunc(mrgns):
"""parses mrgns as array of strings into array of floats
"""
return parse_distance(mrgns[0]), parse_distance(mrgns[1], max_lat), \
parse_distance(mrgns[2]), parse_distance(mrgns[3], min_lat)
top, right, btm, left = parse_margins(margins, parsefunc)
min_lon, min_lat, max_lon, max_lat = min_lon-left, min_lat-btm, max_lon+right, max_lat+top
if min_lon == max_lon:
min_lon -= 10 # in degrees
max_lon += 10 # in degrees
if min_lat == max_lat:
min_lat -= 10 # in degrees
max_lat += 10 # in degrees
# minima must be within bounds:
min_lat = max(-90, min_lat)
max_lat = min(90, max_lat)
min_lon = max(-180, min_lon)
max_lon = min(180, max_lon)
lon_0, lat_0 = get_lon0_lat0(min_lon, min_lat, max_lon, max_lat)
return lon_0, lat_0, min_lon, min_lat, max_lon, max_lat
# static constant converter (degree to meters and viceversa) for latitudes
DEG2M_LAT = 2 * np.pi * 6371 * 1000 / 360
def lat2h(distance_in_degrees):
"""converts latitude distance from degrees to height in meters
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # 2 * np.pi * 6371 * 1000 / 360
return distance_in_degrees * deg2m_lat
def h2lat(distance_in_meters):
"""converts latitude distance from height in meters to degrees
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
return distance_in_meters / deg2m_lat
def lon2w(distance_in_degrees, lat_0):
"""converts longitude distance from degrees to width in meters
:param distance_in_degrees: a distance (python scalar or numpy array)
along the lat_0 circle expressed in degrees
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude of the circle along
which the lon2w(distance_in_degrees) must be converted to meters"""
deg2m_lat = DEG2M_LAT
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_degrees * deg2m_lon
def w2lon(distance_in_meters, lat_0):
"""converts longitude distance from width in meters to degrees
:param distance_in_meters: a distance (python scalar or numpy array)
along the lat_0 circle expressed in meters
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude (in degrees) of the
circle along which w2lon(distance_in_meters) must be converted to degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_meters / deg2m_lon
class MapHandler(object):
"""
Class handling bounds of a map given points (lons and lats)
"""
def __init__(self, lons, lats, map_margins):
"""Initializes a new MapHandler. If figure here is None, you **MUST**
call self.set_fig(fig) to calculate bounds and other stuff
when you have a ready figure"""
self.lons = lons if len(lons) else [0] # FIXME: use numpy arrays!!
self.lats = lats if len(lats) else [0]
self.max_lons, self.min_lons = max(self.lons), min(self.lons)
self.max_lats, self.min_lats = max(self.lats), min(self.lats)
self.lon_0, self.lat_0, self.llcrnrlon, self.llcrnrlat, self.urcrnrlon, self.urcrnrlat = \
getbounds(self.min_lons, self.min_lats, self.max_lons, self.max_lats, map_margins)
def _get_map_dims(self): # , fig_size_in_inches, colorbar=False):
"""Returns the map dimension width, height, in meters"""
max_lons, min_lons = self.urcrnrlon, self.llcrnrlon
max_lats, min_lats = self.urcrnrlat, self.llcrnrlat
height = lat2h(max_lats - min_lats)
width = lon2w(max_lons - min_lons, self.lat_0)
return width, height
def get_parallels(self, max_labels_count=8):
width, height = self._get_map_dims()
lat_0 = self.lat_0
N1 = int(np.ceil(height / max(width, height) * max_labels_count))
parallels = MapHandler._linspace(lat_0 - h2lat(height / 2),
lat_0 + h2lat(height / 2), N1)
return parallels
def get_meridians(self, max_labels_count=8):
width, height = self._get_map_dims()
lon_0 = self.lon_0
lat_0 = self.lat_0
N2 = int(np.ceil(width / max(width, height) * max_labels_count))
meridians = MapHandler._linspace(lon_0 - w2lon(width / 2, lat_0),
lon_0 + w2lon(width / 2, lat_0), N2)
meridians[meridians > 180] -= 360
return meridians
@staticmethod
def _linspace(val1, val2, N):
"""
returns around N 'nice' values between val1 and val2. Copied from obspy.plot_map
"""
dval = val2 - val1
round_pos = int(round(-np.log10(1. * dval / N)))
# Fake negative rounding as not supported by future as of now.
if round_pos < 0:
factor = 10 ** (abs(round_pos))
delta = round(2. * dval / N / factor) * factor / 2
else:
delta = round(2. * dval / N, round_pos) / 2
new_val1 = np.ceil(val1 / delta) * delta
new_val2 = np.floor(val2 / delta) * delta
N = (new_val2 - new_val1) / delta + 1
return np.linspace(new_val1, new_val2, N)
def _normalize(obj, size=None, dtype=None):
""""Casts" obj to a numpy array of the given optional size and optional dtype, and returns it.
If size is not None, the array must have length size. If not, and has length 1, it will be
resized to the specified size. Otherwise a ValueError is raised
If size is None, no resize will be in place and the array is returend as it is
Note: obj=None will be converted to the array [None], apparently in the current version of numpy
this wouldn't be the default (see argument ndmin=1)
:return an numpy array resulting to the coinversion of obj into array
:Examples:
"""
x = np.array(obj, ndmin=1) if dtype is None else np.array(obj, ndmin=1, dtype=dtype)
if size is None:
return np.array([]) if obj is None else x # if obj is None x is [None], return [] instead
try:
if len(x) == 1:
x = np.resize(x, size)
elif len(x) != size:
raise ValueError("invalid array length: %d. Expected %d" % (len(x), size))
except (ValueError, TypeError) as _err:
raise ValueError(str(_err))
return x
def torgba(html_str):
"""Converts html_str into a tuple of rgba colors all in [0, 1]
Curiously, matplotlib color functions do not provide this functionality for
'#RGBA' color formats
:param html_str: a valid html string in hexadecimal format.
Can have length 4, 7 or 9 such as #F1a, #fa98e3, #fc456a09
:return: a rgba vector, i.e. a 4-element numpy array of values in [0,1] denoting `html_str`
:raise: ValueError if html_str is invalid
"""
if len(html_str) not in (4, 7, 9) or not html_str[0] == '#':
raise ValueError("'%s' invalid html string" % html_str)
elif len(html_str) == 4:
rgb = [html_str[i:i+1]*2 for i in xrange(1, len(html_str))]
else:
rgb = [html_str[i:i+2] for i in xrange(1, len(html_str), 2)]
if len(rgb) == 3:
rgb += ['FF']
return np.true_divide(np.array([int(r, 16) for r in rgb]), 255)
def _shapeargs(lons, lats, labels, sizes, colors, markers, legend_labels):
lons = _normalize(lons, dtype=float) # basically: convert to float array if scalar (size=0)
lats = _normalize(lats, dtype=float) # basically: convert to float array if scalar (size=0)
if len(lons) != len(lats):
raise ValueError('mismatch in lengths: lons (%d) and lats (%d)' % (len(lons), len(lats)))
leng = len(lons)
labels = _normalize(labels, size=leng)
colors = _normalize(colors, size=leng)
markers = _normalize(markers, size=leng)
legend_labels = _normalize(legend_labels, size=leng)
# colors[np.isnan(colors) | (colors <= 0)] = 1.0 # nan colors default to 1 (black?)
sizes = _normalize(sizes, size=leng, dtype=float)
valid_points = np.logical_not(np.isnan(lons) | np.isnan(lats) | (sizes <= 0))
# return all points whose corresponding numeric values are not nan:
return (lons[valid_points],
lats[valid_points],
labels[valid_points],
sizes[valid_points],
colors[valid_points],
markers[valid_points],
legend_labels[valid_points])
# def get_ax_size(ax, fig):
# bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# return bbox.width, bbox.height
def pix2inch(pix, fig):
"""Converts pixel to inches on a given matplotlib figure"""
return pix / fig.dpi
def inch2pix(inch, fig):
"""Converts inches to pixel on a given matplotlib figure"""
return inch * fig.dpi
def _joinargs(key_prefix, kwargs, **already_supplied_args):
'''updates already_supplied_args with kwargs using a given prefix in kwargs to identify
common keys. Used in plotmap for kwargs'''
key_prefix += "_"
len_prefix = len(key_prefix)
already_supplied_args.update({k[len_prefix:]: v
for k, v in kwargs.iteritems() if k.startswith(key_prefix)})
return already_supplied_args
def _mp_set_custom_props(drawfunc_retval, lines_props, labels_props):
"""Sets custom properties on drawparallels or drawmeridians return function.
drawfunc_retval is a dict of numbers mapped to tuples where the first element is a list of
matplotlib lines, and the second element is a list of matplotlib texts"""
_setprop(chain.from_iterable((lin for lin, lab in drawfunc_retval.itervalues())), lines_props)
_setprop(chain.from_iterable((lab for lin, lab in drawfunc_retval.itervalues())), labels_props)
def _setprop(iterator_of_mp_objects, props):
'''sets the given properties of an iterator of same type matplotlib objects'''
if not props:
return
prp = {}
for obj in iterator_of_mp_objects:
if not prp:
prp = {"set_%s" % name: val for name, val in props.iteritems()
if hasattr(obj, "set_%s" % name)}
for name, val in prp.iteritems():
getattr(obj, name)(val)
# values below CAN be None but CANNOT be arrays containing None's
def plotmap(lons,
lats,
labels=None,
legendlabels=None,
markers="o",
colors="#FF4400",
sizes=20,
cmap=None,
fontsize=None,
fontweight='regular',
fontcolor='k',
labels_h_offset=0,
labels_v_offset=0,
mapmargins='0.5deg',
figmargins=2,
arcgis_service='World_Street_Map',
arcgis_xpixels=1500,
arcgis_dpi=96,
urlfail='ignore',
maxmeridians=5,
maxparallels=5,
legend_pos='bottom',
legend_borderaxespad=1.5,
legend_ncol=1,
title=None,
show=False,
**kwargs): # @UnusedVariable
"""
Makes a scatter plot of points on a map background using ArcGIS REST API.
:param lons: (array-like of length N or scalar) Longitudes of the data points, in degreee
:param lats: (array-like of length N or scalar) Latitudes of the data points, in degree
:param labels: (array-like of length N or string. Default: None, no labels) Annotations
(labels) for the individual data points on the map. If non-array (e.g. string), the same value
will be applied to all points
:param legendlabels: (array-like of length N or string. Default: None, no legend)
Annotations (labels) for the legend. You can supply a sparse array where only some points
will be displayed on the legend. All points with no legend label will not show up in the
legend
:param sizes: (array-like of length N or number. Default: 20) Sizes (in points^2) of the
individual points in the scatter plot.
:param markers: (array-like of length N,
`MarkerStyle<http://matplotlib.org/api/markers_api.html#matplotlib.markers.MarkerStyle>`_ or
string. Default: 'o' - circle) The markers (shapes) to be drawn for each point on the map.
See `markers <http://matplotlib.org/api/markers_api.html#module-matplotlib.markers>`_ for
more information on the different styles of markers scatter supports. Marker can be either
an instance of the class or the text shorthand for a particular marker.
:param colors: (array-like of length N,
`matplotlib color <http://matplotlib.org/api/colors_api.html>`_, e.g. string.
Default: "#FF4400")
Colors for the markers (fill color). You can type color transparency by supplying string of 9
elements where the last two characters denote the transparency ('00' fully transparent,
'ff' fully opaque). Note that this is a feature not implemented in `matplotlib` colors, where
transparency is given as the last element of the numeric tuple (r, g, b, a)
:param fontsize: (numeric or None. Default: None) The fontsize for all texts drawn on the
map (labels, axis tick labels, legend). None uses the default figure font size for all. Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontweight: (string or number. Default: 'regular') The font weight for all texts drawn
on the map (labels, axis tick labels, legend). Accepts the values (see
http://matplotlib.org/api/text_api.html#matplotlib.text.Text.set_weight):
```
[a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' |
'bold' | 'heavy' | 'extra bold' | 'black' ]
```
Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontcolor: (`matplotlib color <http://matplotlib.org/api/colors_api.html>`_ or
string. Default: 'k', black) The font color for all texts drawn on the
map (labels, axis tick labels, legend). Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param labels_h_offset: (string, number. Defaults None=0) The horizontal offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels westward, positive values eastward. Useful for not overlapping
markers and labels.
If numeric, it is assumed to be the expressed in degrees. Otherwise, you can supply a string
with a number followed by one of the units 'm', 'km' or 'deg' (e.g., '5km', '0.5deg').
Note that this value affects the
`horizontalalignment` and `multialignment` properties of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_horizontalalignment` or `labels_ha` as optional argument will override
this behaviour (see `kwargs` below)
:param labels_v_offset: (string, number. Defaults None=0) The vertical offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels southhward, positive values northward. See notes on `labels_h_offset` for details
Note that this value affects the
`verticalalignment` property of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_verticalalignment` or `labels_va` as optional argument will override
this behaviour (see `kwargs` below)
:param mapmargins: (array-like of 1,2,3,4 elements, numeric or string, or None=0.
Default: '0.5deg').
The map margins, i.e. how much the map has to 'expand/shrink' in any direction, relative
to the bounding box calculated to include all points.
If array-like, it behaves like the css 'margin' property of html: 4 elements will denote
[top, right, bottom, left], two elements will denote [top/bottom, left/right], three
elements [top, right/left, bottom], a single element array (or a single number or a string)
applies the value to all directions.
Finally, elements of the array must be expressed as the arguments `labels_h_offset` or
`labels_v_offset`: numbers denoting degrees or strings with units 'm', 'km', 'deg'. Negative
values will shrink the map.
If string, the argument will be first splitted using commas, semicolon or spaces as delimiters
(if no delimiter is found, the string is taken as a single chunk) and converted to an array-like
object.
:param figmargins: (array-like of 1,2,3,4 elements, number or None=0. Default:2) The
figure margins *in font height units* (e.g., 2 means: twice the font height). This argument
behaves exactly as `mapmargins` but expands/shrinks the distances between map and figure
(image) bounds. Useful to include axis tick labels or legend, if they overflow.
Note also that strings
are allowed only if they are parsable to float (e.g. "5,6; -12 1")
:param arcgis_service: (string, default: 'World_Street_Map'). The map image type, or
more technically the service for the map
hosted on ArcGIS server. Other values are 'ESRI_Imagery_World_2D'
(default in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_),
'World_Topo_Map', 'World_Terrain_Base'. For details, see:
http://server.arcgisonline.com/arcgis/rest/services.
:param arcgis_xpixels: (numeric, default: 3000). Requested number of image pixels
in x-direction (default is 400 in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_).
The documentation is quite unclear but this parameter seems to set the zoom of the image. From
this `link <http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage>`_:
A bigger number will ask a bigger image, so the image will have more detail.
So when the zoom is bigger, `xsize` must be bigger to maintain the resolution
:param urlfail: (string, 'raise' or 'ignore'. Default: 'ignore'). Tells what to do if the
ArcGIS requet fails (URLError, no internet connection etcetera). By default, on failure a raw
map with continents contour, and oceans will be plotted (good for
debug). Otherwise, the exception resulting from the web request is raised
:param maxmeridians: (numeric default: 5). The number of maximum meridians to be drawn. Set to
<=0 to hide meridians. Note that also x-axis labels are drawn.
To further manipulate meridians display, use any argument starting with
'mlabels_', 'mlines_' or 'meridians' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `meridians_linewidth=0` or 'mlines_linewidth=0'.
:param maxparallels: (numeric default: 5). The number of maximum parallels to be drawn. Set to
<=0 to hide parallels. Note that also y-axis labels are drawn.
To further manipulate parallels display, use any argument starting with
'plabels_', 'plines_' or 'parallels' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `parallels_linewidth=0` or 'plines_linewidth=0'.
:param legend_pos: (string in ['top'. 'bottom', 'right', 'left'], default='bottom'). The legend
location with respect to the map. It also adjusts the bounding box that the legend will be
anchored to.
For
customizing entirely the legend placement overriding this parameter, provide `legend_loc`
(and optionally `legend_bbox_to_anchor`) in `kwargs` (see below)
:param legend_borderaxespad: (numeric, default 1.5) The pad between the axes and legend border,
in font units
:param legend_ncol: (integer, default=1) The legend number of columns
:param title (string or None. Default: None): Title above plot (Note: not tested)
:param show (boolean, default: False): Whether to show the figure after plotting or not
(Note: not tested). Can be used to do further customization of the plot before showing it.
:param fig: (matplotlib figure or None, default: None). Note: deprecated, pass None as
supplying an already existing figure with other axes might break the figure layout
:param kwargs: any kind of additional argument passed to `matplotlib` and `Basemap` functions
or objects.
The name of the argument must be of the form
```
prefix_propertyname=propertyvalue
```
where prefix indicates the function/object to be called with keyword argument:
```
propertyname=propertyvalue
```
Current supported prefixes are (for available property names see links):
Prefix Passes `propertyname` to
============ ==================================================================================
arcgis `Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>_
used to retrieve the background map using ArgGIS Server REST API. See also
http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage
basemap `Basemap <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap>`_
the object responsible of drawing and managing the map. Note that
`basemap_resolution=h` and `basemap_epsg=4326` by default.
labels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the point labels on the map
legend The `legend <http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_.
See the already implemented arguments `legend_borderaxespad`,
`legend_ncol`
legendlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the text labels of the legend
meridians `Basemap.drawmeridians`. For more detailed settings on meridians, see
`mlines` and `mlabels`
parallels `Basemap.drawparallels`. For more detailed settings on parallels, see
`plines` and `plabels`
plines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the parallels
plabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the parallels labels on the y axis
mlines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the meridians
mlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the meridians labels on the x axis
============ ==================================================================================
Examples
--------
- `legend_title='abc'` will call `legend(..., title='abc', ...)`
- `labels_path_effects=[PathEffects.withStroke(linewidth=2, foreground='white')]` will set the
a white contour around each label text
- `meridians_labelstyle="+/-"` will call `Basemap.drawmeridians(..., labelstyle="+/-", ...)`
Notes:
------
The objects referenced by `plines`, `plabels`, `mlines`, `mlabels` and `legendlabels`
cannot be initialized directly with the given properties, which will be set after they are
created assuming that for any property `foo` passed as keyword argument in their constructor
there exist a method `set_foo(...)` (which will be called with the given propertyvalue).
This is most likely always true according to matplotlib api, but we cannot assure it works
100% of the times
"""
lons, lats, labels, sizes, colors, markers, legendlabels =\
_shapeargs(lons, lats, labels, sizes, colors, markers, legendlabels)
# convert html strings to tuples of rgba values in [0.1] if the former are in string format,
# because (maybe too old matplotlib version?) colors in the format '#RGBA' are not supported
# Also, if cmap is provided, basemap.scatter calls matplotlib.scatter which
# wants float sequenes in case of color map
if colors.dtype.char in ('U', 'S'): # pylint: disable=no-member
colors = np.array([torgba(c) for c in colors])
fig = plt.figure()
map_ax = fig.add_axes([0, 0, 1, 1]) # set axes size the same as figure
# setup handler for managing basemap coordinates and meridians / parallels calculation:
handler = MapHandler(lons, lats, mapmargins)
kwa = _joinargs('basemap', kwargs,
llcrnrlon=handler.llcrnrlon,
llcrnrlat=handler.llcrnrlat,
urcrnrlon=handler.urcrnrlon,
urcrnrlat=handler.urcrnrlat,
epsg='4326', # 4326, # 3395, # 3857,
resolution='i', # 'h',
ax=map_ax)
bmap = Basemap(**kwa)
try:
kwa = _joinargs("arcgis", kwargs, service=arcgis_service, xpixels=arcgis_xpixels,
dpi=arcgis_dpi)
# set the map image via a map service. In case you need the returned values, note that
# This function returns an ImageAxis (or AxisImage, check matplotlib doc)
bmap.arcgisimage(**kwa)
except (URLError, HTTPError, socket.error) as exc:
# failed, maybe there is not internet connection
if urlfail == 'ignore':
# Print a simple map offline
bmap.drawcoastlines()
watercolor = '#4444bb'
bmap.fillcontinents(color='#eebb66', lake_color=watercolor)
bmap.drawmapboundary(fill_color=watercolor)
else:
raise
# draw meridians and parallels. From basemap.drawmeridians / drawparallels doc:
# returns a dictionary whose keys are the meridian values, and
# whose values are tuples containing lists of the
# matplotlib.lines.Line2D and matplotlib.text.Text instances
# associated with each meridian. Deleting an item from the
# dictionary removes the correpsonding meridian from the plot.
if maxparallels > 0:
kwa = _joinargs("parallels", kwargs, linewidth=1, fontsize=fontsize,
labels=[0, 1, 1, 0], fontweight=fontweight)
parallels = handler.get_parallels(maxparallels)
# Old basemap versions have problems with non-integer parallels.
try:
# Note: the method below # returns a list of text object
# represeting the tick labels
_dict = bmap.drawparallels(parallels, **kwa)
except KeyError:
parallels = sorted(list(set(map(int, parallels))))
_dict = bmap.drawparallels(parallels, **kwa)
# set custom properties:
kwa_lines = _joinargs("plines", kwargs)
kwa_labels = _joinargs("plabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
if maxmeridians > 0:
kwa = _joinargs("meridians", kwargs, linewidth=1, fontsize=fontsize,
labels=[1, 0, 0, 1], fontweight=fontweight)
meridians = handler.get_meridians(maxmeridians)
_dict = bmap.drawmeridians(meridians, **kwa)
# set custom properties:
kwa_lines = _joinargs("mlines", kwargs)
kwa_labels = _joinargs("mlabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
# fig.get_axes()[0].tick_params(direction='out', length=15) # does not work, check basemap
fig.bmap = bmap
# compute the native bmap projection coordinates for events.
# from the docs (this is kind of outdated, however leave here for the moment):
# Calling a Basemap class instance with the arguments lon, lat will
# convert lon/lat (in degrees) to x/y map projection
# coordinates (in meters). If optional keyword ``inverse`` is
# True (default is False), the inverse transformation from x/y
# to lon/lat is performed.
# For cylindrical equidistant projection (``cyl``), this
# does nothing (i.e. x,y == lon,lat).
# For non-cylindrical projections, the inverse transformation
# always returns longitudes between -180 and 180 degrees. For
# cylindrical projections (self.projection == ``cyl``,
# ``cea``, ``mill``, ``gall`` or ``merc``)
# the inverse transformation will return longitudes between
# self.llcrnrlon and self.llcrnrlat.
# Input arguments lon, lat can be either scalar floats,
# sequences, or numpy arrays.
# parse hoffset and voffset and assure they are at least arrays of 1 elements
# (for aligning text labels, see below)
hoffset = np.array(parse_distance(labels_h_offset, lats), copy=False, ndmin=1)
voffset = np.array(parse_distance(labels_v_offset), copy=False, ndmin=1)
lbl_lons = lons + hoffset
lbl_lats = lats + voffset
# convert labels coordinates:
xlbl, ylbl = bmap(lbl_lons, lbl_lats)
# plot point labels
max_points = -1 # negative means: plot all
if max_points < 0 or len(lons) < max_points:
# Set alignments which control also the corner point reference when placing labels
# from (FIXME: add ref?)
# horizontalalignment controls whether the x positional argument for the text indicates
# the left, center or right side of the text bounding box.
# verticalalignment controls whether the y positional argument for the text indicates
# the bottom, center or top side of the text bounding box.
# multialignment, for newline separated strings only, controls whether the different lines
# are left, center or right justified
ha = 'left' if hoffset[0] > 0 else 'right' if hoffset[0] < 0 else 'center'
va = 'bottom' if voffset[0] > 0 else 'top' if voffset[0] < 0 else 'center'
ma = ha
kwa = _joinargs("labels", kwargs, fontweight=fontweight, color=fontcolor,
zorder=100, fontsize=fontsize, horizontalalignment=ha,
verticalalignment=va, multialignment=ma)
for name, xpt, ypt in zip(labels, xlbl, ylbl):
# Check if the point can actually be seen with the current bmap
# projection. The bmap object will set the coordinates to very
# large values if it cannot project a point.
if xpt > 1e25:
continue
map_ax.text(xpt, ypt, name, **kwa)
# plot points
x, y = bmap(lons, lats)
# store handles to points, and relative labels, if any
leg_handles, leg_labels = [], []
# bmap.scatter accepts all array-like args except markers. Avoid several useless loops
# and do only those for distinct markers:
# unique markers (sorted according to their index in markers, not their value):
mrks = markers[np.sort(np.unique(markers, return_index=True)[1])]
for mrk in mrks:
# Note using masks with '==' (numpy==1.11.3):
#
# >>> a = np.array([1,2,3])
# >>> a == 3
# array([False, False, True], dtype=bool) # OK
# >>> a == None
# False # NOT AS EXPECTED!
# >>> np.equal(a, None)
# array([False, False, False], dtype=bool) # OK
#
# (Note also that a == None issues:
# FutureWarning: comparison to `None` will result in an elementwise object
# comparison in the future.)
#
# So the correct way is to write
# mask = np.equal(array, val) if val is None else (a == val)
m_mask = np.equal(markers, mrk) if mrk is None else markers == mrk # see above
__x = x[m_mask]
__y = y[m_mask]
__m = mrk
__s = sizes[m_mask]
__c = colors[m_mask]
__l = legendlabels[m_mask]
# unique legends (sorted according to their index in __l, not their value):
for leg in __l[np.sort(np.unique(__l, return_index=True)[1])]:
l_mask = np.equal(__l, leg) if leg is None else __l == leg # see above
_scatter = bmap.scatter(__x[l_mask],
__y[l_mask],
marker=mrk,
s=__s[l_mask],
c=__c[l_mask],
cmap=cmap,
zorder=10)
if leg:
leg_handles.append(_scatter)
leg_labels.append(leg)
if leg_handles:
# if we provided `legend_loc`, use that:
loc = kwargs.get('legend_loc', None)
bbox_to_anchor = None # defaults in matplotlib legend
# we do have legend to show. Adjust legend reference corner:
if loc is None:
if legend_pos == 'bottom':
loc = 'upper center'
bbox_to_anchor = (0.5, -0.05)
elif legend_pos == 'top':
loc = 'lower center'
bbox_to_anchor = (0.5, 1.05)
elif legend_pos == 'left':
loc = 'center right'
bbox_to_anchor = (-0.05, 0.5)
elif legend_pos == 'right':
loc = 'center left'
bbox_to_anchor = (1, 0.5)
else:
raise ValueError('invalid legend_pos value:"%s"' % legend_pos)
# The plt.legend has the prop argument which sets the font properties:
# family, style, variant, weight, stretch, size, fname. See
# http://matplotlib.org/api/font_manager_api.html#matplotlib.font_manager.FontProperties
# However, that property does not allow to set font color. So we
# use the get_text method of Legend. Note that we pass font size *now* even if
# setting it later works as well (the legend frame is resized accordingly)
kwa = _joinargs("legend", kwargs, scatterpoints=1, ncol=legend_ncol, loc=loc,
bbox_to_anchor=bbox_to_anchor, borderaxespad=legend_borderaxespad,
fontsize=fontsize)
# http://stackoverflow.com/questions/17411940/matplotlib-scatter-plot-legend
leg = map_ax.legend(leg_handles, leg_labels, **kwa)
# set properties supplied via 'legend_'
_setprop(leg.get_texts(), _joinargs("legendlabels", kwargs, color=fontcolor))
# re-position the axes. The REAL map aspect ratio seems to be this:
realratio_h_w = bmap.aspect
fig_w, fig_h = fig.get_size_inches()
figratio_h_w = np.true_divide(fig_h, fig_w)
if figratio_h_w >= realratio_h_w:
# we have margins (blank space) above and below
# thus, we assume:
map_w = fig_w
# and we calculate map_h
map_h = map_w * realratio_h_w
# assume there is the same amount of space above and below:
vpad = (fig_h - map_h) / 2.0
# hpad is zero:
hpad = 0
else:
# we have margins (blank space) left and right
# thus, we assume:
map_h = fig_h
# and consequently:
map_w = map_h / realratio_h_w
# assume there is the same amount of space above and below:
hpad = (fig_w - map_w) / 2.0
# wpad is zero:
vpad = 0
# calculate new fig dimensions EXACTLY as contour of the map
new_fig_w = fig_w - 2 * hpad
new_fig_h = fig_h - 2 * vpad
# now margins:
marginz = parse_margins(figmargins) # margins are in fontheight units. Get font height:
fontsize_inch = 0
if len(np.nonzero(marginz)[0]):
# Calculate the font size in pixels.
# We want to be consistent with matplotlib way of getting fontsize.
# inspecting matplotlib.legend.Legend.draw we end up with:
# 1. Get the renderer
rend = fig.canvas.get_renderer()
# 2. get the fontsize in points. We might use `fontsize` but it might be None and we want
# the default in case. There are several 'defaults' (rcParams['font.size'],
# rcParams["legend.fontsize"])... we don't care for now, use the first. How to get
# rcParams['font.size'] ? Either this: (see at matplotlib.Legend.__init__):
# fontsize_pt = FontProperties(size=fontsize, weight=fontweight).get_size_in_points()
# or simply do:
fontsize_pt = fontsize or rcParams['font.size']
# Now use renderer to convert to pixels:
# For info see matplotlib.text.Text.get_window_extent
fontsize_px = rend.points_to_pixels(fontsize_pt)
# finally inches:
fontsize_inch = pix2inch(rend.points_to_pixels(fontsize_px), fig)
# calculate insets in inches (top right bottom left)
insets_inch = marginz * fontsize_inch
# set to fig dimensions
new_fig_w += insets_inch[1] + insets_inch[3]
new_fig_h += insets_inch[0] + insets_inch[2]
fig.set_size_inches(new_fig_w, new_fig_h, forward=True)
# (forward necessary if fig is in GUI, let's set for safety)
# now the axes which are relative to the figure. Thus first normalize inches:
insets_inch /= [fig_h, fig_w, fig_h, fig_w]
# pos1 = map_ax.get_position() # get the original position
# NOTE: it seems that pos[0], pos[1] indicate the x and y of the LOWER LEFT corner, not
# upper left!
pos2 = [insets_inch[3], insets_inch[2],
1 - (insets_inch[1] + insets_inch[3]),
1 - (insets_inch[0] + insets_inch[2])]
map_ax.set_position(pos2)
if title:
plt.suptitle(title)
if show:
plt.show()
return fig
| gpl-3.0 |
PatrickOReilly/scikit-learn | sklearn/feature_selection/__init__.py | 140 | 1302 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
| bsd-3-clause |
rnowling/pop-gen-models | single-pop/single_pop.py | 1 | 3379 | import sys
import numpy as np
import numpy.random as npr
from sklearn.neighbors.kde import KernelDensity
from scipy.special import gammaln
import matplotlib.pyplot as plt
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
class KDE_MCMC_Sampler(object):
def __init__(self, observed_counts):
"""
Observed counts is 3D matrix of pop, locus, haplotype
"""
self.observed_counts = observed_counts
self.individual_counts = observed_counts.sum(axis=2)
self.observed_frequencies = normalize_haplotypes(observed_counts)
self.n_loci, self.n_pop, self.n_haplotypes = self.observed_counts.shape
# from bamova
self.DWEIGHT = 1.0
self.DADD = 0.00001
self.SMALL_NUM = 0.0000000000001
print "initializing frequencies"
self.freq = np.zeros((self.n_loci, self.n_haplotypes))
for l in xrange(self.n_loci):
self.freq[l, :] = self.sample_locus_freq(self.observed_frequencies[l, 0, :])
def sample_locus_freq(self, freq):
alphas = self.DWEIGHT * freq + self.DADD + self.SMALL_NUM
return npr.dirichlet(alphas)
def locus_prob(self, locus_obs_counts, locus_freq):
log_prob_sum = 0.0
for p in xrange(self.n_pop):
log_prob_sum += log_multinomial(locus_obs_counts[p], locus_freq)
return log_prob_sum
def step(self):
total_log_prob = 0.0
for l in xrange(self.n_loci):
locus_indiv_counts = self.individual_counts[l, :]
locus_obs_counts = self.observed_counts[l, :, :]
log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
proposed_locus_freq = self.sample_locus_freq(self.freq[l, :])
proposed_log_prob = self.locus_prob(locus_obs_counts, proposed_locus_freq)
log_prob_ratio = proposed_log_prob - log_prob
log_r = np.log(npr.random())
if proposed_log_prob >= log_prob or log_r <= log_prob_ratio:
self.freq[l, :] = proposed_locus_freq
log_prob = proposed_log_prob
total_log_prob += log_prob
locus_prob = []
for l in xrange(self.n_loci):
log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
locus_prob.append(log_prob)
return self.freq, total_log_prob, locus_prob
def plot_log_prob(flname, log_probs):
plt.clf()
plt.hold(True)
plt.hist(log_probs, bins=30)
plt.xlabel("Log Probability", fontsize=16)
plt.xlim([min(log_probs), 0.0])
plt.ylabel("Occurrences (Loci)", fontsize=16)
plt.savefig(flname, DPI=200)
def simulate(occur_fl, n_steps, plot_flname, prob_flname):
print "reading occurrences"
observed_counts = read_counts(occur_fl)
individual_counts = observed_counts.sum(axis=2)
observed_frequencies = normalize_haplotypes(observed_counts)
sampler = KDE_MCMC_Sampler(observed_counts)
fl = open(prob_flname, "w")
locus_log_prob = []
for i in xrange(n_steps):
freq, log_prob, locus_log_prob = sampler.step()
print "step", i, "log prob", log_prob
if i % 100 == 0:
for j, prob in enumerate(locus_log_prob):
fl.write("%s %s %s\n" % (i, j, prob))
fl.close()
plot_log_prob(plot_flname, locus_log_prob)
if __name__ == "__main__":
occur_fl = sys.argv[1]
n_steps = int(sys.argv[2])
plot_flname = sys.argv[3]
prob_flname = sys.argv[4]
simulate(occur_fl, n_steps, plot_flname, prob_flname)
| apache-2.0 |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_14_evaluation_metrics/evaluate_poi_identifier.py | 1 | 2588 | #!/usr/bin/python
"""
starter code for the evaluation mini-project
start by copying your trained/tested POI identifier from
that you built in the validation mini-project
the second step toward building your POI identifier!
start by loading/formatting the data
"""
import pickle
import sys
sys.path.append("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/tools/")
from feature_format import featureFormat, targetFeatureSplit
from sklearn.tree import DecisionTreeClassifier
from sklearn import cross_validation
import numpy as np
data_dict = pickle.load(open("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/final_project/final_project_dataset.pkl", "r") )
### add more features to features_list!
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
### your code goes here
features_train,features_test,labels_train,labels_test = cross_validation.train_test_split(features,labels,test_size=0.3,
random_state=42)
clf = DecisionTreeClassifier()
clf.fit(features_train,labels_train)
clf.score(features_test,labels_test)
# How many POIs are in the test set for your POI identifier?
pred = clf.predict(features_test)
sum(pred)
print len([e for e in labels_test if e == 1.0])
# How many people total are in your test set?
len(pred)
# If your identifier predicted 0. (not POI) for everyone in the test set, what would its accuracy be?
1.0 - 5.0/29
# Precision and recall can help illuminate your performance better.
# Use the precision_score and recall_score available in sklearn.metrics to compute those quantities.
# What’s the precision?
from sklearn.metrics import *
precision_score(labels_test, pred)
# What’s the recall?
recall_score(labels_test, pred)
# Here are some made-up predictions and true labels for a hypothetical test set;
# fill in the following boxes to practice identifying true positives, false positives, true negatives, and false negatives.
# Let’s use the convention that “1” signifies a positive result, and “0” a negative.
predictions = [0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]
true_labels = [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0]
# What's the precision of this classifier?
precision_score(true_labels, predictions)
# What's the recall of this classifier?
recall_score(true_labels, predictions)
| mit |
ARUNSOORAJPS/flipkart_gridlock | src/main.py | 1 | 2686 | # -*- coding: utf-8 -*-
# @Author: chandan
# @Date: 2017-07-08 00:32:09
# @Last Modified by: chandan
# @Last Modified time: 2017-07-08 11:13:46
from data_utils import read_file
from config import DATA_DIR, SCORE_COLUMNS
import os
from model import train_model, test_model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import os.path as osp
ACC_FILE = 'RAW_ACCELEROMETERS.txt'
GPS_FILE = 'RAW_GPS.txt'
VEHDET_FILE = 'PROC_VEHICLE_DETECTION.txt'
SCORE_FILE = 'SEMANTIC_ONLINE.txt'
def main():
# read acc, gps, veh det for multiple drivers, scenes
X_dfs, Y_dfs = [], []
driver_dir = 'D1'
for drive_dir in os.listdir(osp.join(DATA_DIR, driver_dir)):
drive_path = osp.join(DATA_DIR, driver_dir, drive_dir)
print drive_path
acc = read_file(osp.join(drive_path, ACC_FILE))
gps = read_file(osp.join(drive_path, GPS_FILE))
veh = read_file(osp.join(drive_path, VEHDET_FILE))
score = read_file(osp.join(drive_path, SCORE_FILE))
datasets = [acc, gps, veh, score]
n_rows = min(map(len, datasets))
# sample high frequency data to lowest frequency
for i in range(len(datasets)):
# drop time column
datasets[i].drop(0, 1, inplace=True)
if len(datasets[i]) > n_rows:
step = len(datasets[i]) / n_rows
ndx = xrange(0, n_rows * step, step)
datasets[i] = datasets[i].ix[ndx]
datasets[i] = datasets[i].reset_index(drop=True)
score_df = datasets[-1]
datasets = datasets[:-1]
Y_df = score.ix[:, SCORE_COLUMNS]
# create dataset
X_df = pd.concat(datasets, axis=1, ignore_index=True)
X_df.fillna(0, inplace=True)
print "X:", X_df.shape
print "Y:", score_df.shape
X_dfs.append(X_df)
Y_dfs.append(Y_df)
# preprocess
X_df = pd.concat(X_dfs, ignore_index=True)
X = X_df.values.astype('float32')
Y = pd.concat(Y_dfs, ignore_index=True).values
print "X shape:", X.shape
print "Y shape:", Y.shape
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
X_tr, X_ts, Y_tr, Y_ts = train_test_split(X, Y, test_size=0.2)
# train
print "X Train shape:", X_tr.shape
print "Y Train shape:", Y_tr.shape
print "X test shape:", X_ts.shape
print "Y test shape:", Y_ts.shape
seq_len = 16
X_tr_seq = X_to_seq(X, seq_len, 1)
Y_tr = Y_tr[seq_len:]
X_ts_seq = X_to_seq(X_ts, seq_len, 1)
Y_ts = Y_ts[seq_len:]
#train_model(X_tr, Y_tr)
loss = test_model(X_ts_seq, Y_ts)
print loss
def X_to_seq(X, seq_len=16, stride=1):
X_seqs = []
for start_ndx in range(0, len(X) - seq_len, stride):
X_seqs.append(X[start_ndx : start_ndx + seq_len])
return np.array(X_seqs)
if __name__ == '__main__':
main() | mit |
lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/py2app/build_app.py | 9 | 77527 | """
Mac OS X .app build command for distutils
Originally (loosely) based on code from py2exe's build_exe.py by Thomas Heller.
"""
from __future__ import print_function
import imp
import sys
import os
import zipfile
import plistlib
import shlex
import shutil
import textwrap
import pkg_resources
import collections
from modulegraph import modulegraph
from py2app.apptemplate.setup import main as script_executable
from py2app.util import mergecopy, make_exec
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from itertools import chain
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from modulegraph.find_modules import find_modules, parse_mf_results, find_needed_modules
from modulegraph.modulegraph import SourceModule, Package, Script
from modulegraph import zipio
import macholib.dyld
import macholib.MachOStandalone
import macholib.MachO
from macholib.util import flipwritable
from py2app.create_appbundle import create_appbundle
from py2app.create_pluginbundle import create_pluginbundle
from py2app.util import \
fancy_split, byte_compile, make_loader, imp_find_module, \
copy_tree, fsencoding, strip_files, in_system_path, makedirs, \
iter_platform_files, find_version, skipscm, momc, copy_file, \
copy_resource
from py2app.filters import \
not_stdlib_filter, not_system_filter, has_filename_filter
from py2app import recipes
from distutils.sysconfig import get_config_var, get_config_h_filename
PYTHONFRAMEWORK=get_config_var('PYTHONFRAMEWORK')
PLUGIN_SUFFIXES = {
'.qlgenerator': 'QuickLook',
'.mdimporter': 'Spotlight',
'.xpc': 'XPCServices',
'.service': 'Services',
'.prefPane': 'PreferencePanes',
'.iaplugin': 'InternetAccounts',
'.action': 'Automator',
}
try:
basestring
except NameError:
basestring = str
def rewrite_tkinter_load_commands(tkinter_path):
print("rewrite_tk", tkinter_path)
m = macholib.MachO.MachO(tkinter_path)
tcl_path = None
tk_path = None
rewrite_map = {}
for header in m.headers:
for idx, name, other in header.walkRelocatables():
if other.endswith('/Tk'):
if tk_path is not None and other != tk_path:
raise DistutilsPlatformError('_tkinter is linked to different Tk paths')
tk_path = other
elif other.endswith('/Tcl'):
if tcl_path is not None and other != tcl_path:
raise DistutilsPlatformError('_tkinter is linked to different Tcl paths')
tcl_path = other
if tcl_path is None or 'Tcl.framework' not in tcl_path:
raise DistutilsPlatformError('_tkinter is not linked a Tcl.framework')
if tk_path is None or 'Tk.framework' not in tk_path:
raise DistutilsPlatformError('_tkinter is not linked a Tk.framework')
system_tcl_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tcl.framework/Versions') if nm != 'Current']
system_tk_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tk.framework/Versions') if nm != 'Current']
if not tcl_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tcl
ver = os.path.basename(os.path.dirname(tcl_path))
if ver not in system_tcl_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tcl not in /System')
rewrite_map[tcl_path] = '/System/Library/Frameworks/Tcl.framework/Versions/%s/Tcl'%(ver,)
if not tk_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tk
ver = os.path.basename(os.path.dirname(tk_path))
if ver not in system_tk_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tk not in /System')
rewrite_map[tk_path] = '/System/Library/Frameworks/Tk.framework/Versions/%s/Tk'%(ver,)
if rewrite_map:
print("Relinking _tkinter.so to system Tcl/Tk")
rewroteAny = False
for header in m.headers:
for idx, name, other in header.walkRelocatables():
data = rewrite_map.get(other)
if data:
if header.rewriteDataForCommand(idx, data.encode(sys.getfilesystemencoding())):
rewroteAny = True
if rewroteAny:
old_mode = flipwritable(m.filename)
try:
with open(m.filename, 'rb+') as f:
for header in m.headers:
f.seek(0)
header.write(f)
f.seek(0, 2)
f.flush()
finally:
flipwritable(m.filename, old_mode)
else:
print("_tkinter already linked against system Tcl/Tk")
def get_zipfile(dist, semi_standalone=False):
if sys.version_info[0] == 3:
if semi_standalone:
return "python%d.%d/site-packages.zip"%(sys.version_info[:2])
else:
return "python%d%d.zip"%(sys.version_info[:2])
return getattr(dist, "zipfile", None) or "site-packages.zip"
def framework_copy_condition(src):
# Skip Headers, .svn, and CVS dirs
return skipscm(src) and os.path.basename(src) != 'Headers'
class PythonStandalone(macholib.MachOStandalone.MachOStandalone):
def __init__(self, appbuilder, *args, **kwargs):
super(PythonStandalone, self).__init__(*args, **kwargs)
self.appbuilder = appbuilder
def copy_dylib(self, src):
dest = os.path.join(self.dest, os.path.basename(src))
if os.path.islink(src):
dest = os.path.join(self.dest, os.path.basename(os.path.realpath(src)))
# Ensure that the orginal name also exists, avoids problems when
# the filename is used from Python (see issue #65)
#
# NOTE: The if statement checks that the target link won't
# point to itself, needed for systems like homebrew that
# store symlinks in "public" locations that point to
# files of the same name in a per-package install location.
link_dest = os.path.join(self.dest, os.path.basename(src))
if os.path.basename(link_dest) != os.path.basename(dest):
os.symlink(os.path.basename(dest), link_dest)
else:
dest = os.path.join(self.dest, os.path.basename(src))
return self.appbuilder.copy_dylib(src, dest)
def copy_framework(self, info):
destfn = self.appbuilder.copy_framework(info, self.dest)
dest = os.path.join(self.dest, info['shortname'] + '.framework')
self.pending.append((destfn, iter_platform_files(dest)))
return destfn
def iterRecipes(module=recipes):
for name in dir(module):
if name.startswith('_'):
continue
check = getattr(getattr(module, name), 'check', None)
if check is not None:
yield (name, check)
# A very loosely defined "target". We assume either a "script" or "modules"
# attribute. Some attributes will be target specific.
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
# If modules is a simple string, assume they meant list
m = self.__dict__.get("modules")
if m and isinstance(m, basestring):
self.modules = [m]
def get_dest_base(self):
dest_base = getattr(self, "dest_base", None)
if dest_base: return dest_base
script = getattr(self, "script", None)
if script:
return os.path.basename(os.path.splitext(script)[0])
modules = getattr(self, "modules", None)
assert modules, "no script, modules or dest_base specified"
return modules[0].split(".")[-1]
def validate(self):
resources = getattr(self, "resources", [])
for r_filename in resources:
if not os.path.isfile(r_filename):
raise DistutilsOptionError(
"Resource filename '%s' does not exist" % (r_filename,))
def validate_target(dist, attr, value):
res = FixupTargets(value, "script")
other = {"app": "plugin", "plugin": "app"}
if res and getattr(dist, other[attr]):
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
def FixupTargets(targets, default_attribute):
if not targets:
return targets
try:
targets = eval(targets)
except:
pass
ret = []
for target_def in targets:
if isinstance(target_def, basestring):
# Create a default target object, with the string as the attribute
target = Target(**{default_attribute: target_def})
else:
d = getattr(target_def, "__dict__", target_def)
if default_attribute not in d:
raise DistutilsOptionError(
"This target class requires an attribute '%s'"
% (default_attribute,))
target = Target(**d)
target.validate()
ret.append(target)
return ret
def normalize_data_file(fn):
if isinstance(fn, basestring):
fn = convert_path(fn)
return ('', [fn])
return fn
def is_system():
prefix = sys.prefix
if os.path.exists(os.path.join(prefix, ".Python")):
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
return in_system_path(prefix)
def installation_info(version=None):
if version is None:
version = sys.version
if is_system():
return version[:3] + " (FORCED: Using vendor Python)"
else:
return version[:3]
class py2app(Command):
description = "create a Mac OS X application or plugin from Python scripts"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [
("app=", None,
"application bundle to be built"),
("plugin=", None,
"plugin bundle to be built"),
('optimize=', 'O',
"optimization level: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
("includes=", 'i',
"comma-separated list of modules to include"),
("packages=", 'p',
"comma-separated list of packages to include"),
("iconfile=", None,
"Icon file to use"),
("excludes=", 'e',
"comma-separated list of modules to exclude"),
("dylib-excludes=", 'E',
"comma-separated list of frameworks or dylibs to exclude"),
("datamodels=", None,
"xcdatamodels to be compiled and copied into Resources"),
("mappingmodels=", None,
"xcmappingmodels to be compiled and copied into Resources"),
("resources=", 'r',
"comma-separated list of additional data files and folders to include (not for code!)"),
("frameworks=", 'f',
"comma-separated list of additional frameworks and dylibs to include"),
("plist=", 'P',
"Info.plist template file, dict, or plistlib.Plist"),
("extension=", None,
"Bundle extension [default:.app for app, .plugin for plugin]"),
("graph", 'g',
"output module dependency graph"),
("xref", 'x',
"output module cross-reference as html"),
("no-strip", None,
"do not strip debug and local symbols from output"),
#("compressed", 'c',
# "create a compressed zipfile"),
("no-chdir", 'C',
"do not change to the data directory (Contents/Resources) [forced for plugins]"),
#("no-zip", 'Z',
# "do not use a zip file (XXX)"),
("semi-standalone", 's',
"depend on an existing installation of Python " + installation_info()),
("alias", 'A',
"Use an alias to current source file (for development only!)"),
("argv-emulation", 'a',
"Use argv emulation [disabled for plugins]."),
("argv-inject=", None,
"Inject some commands into the argv"),
("emulate-shell-environment", None,
"Emulate the shell environment you get in a Terminal window"),
("use-pythonpath", None,
"Allow PYTHONPATH to effect the interpreter's environment"),
("use-faulthandler", None,
"Enable the faulthandler in the generated bundle (Python 3.3 or later)"),
("verbose-interpreter", None,
"Start python in verbose mode"),
('bdist-base=', 'b',
'base directory for build library (default is build)'),
('dist-dir=', 'd',
"directory to put final built distributions in (default is dist)"),
('site-packages', None,
"include the system and user site-packages into sys.path"),
("strip", 'S',
"strip debug and local symbols from output (on by default, for compatibility)"),
("prefer-ppc", None,
"Force application to run translated on i386 (LSPrefersPPC=True)"),
('debug-modulegraph', None,
'Drop to pdb console after the module finding phase is complete'),
("debug-skip-macholib", None,
"skip macholib phase (app will not be standalone!)"),
("arch=", None, "set of architectures to use (fat, fat3, universal, intel, i386, ppc, x86_64; default is the set for the current python binary)"),
("qt-plugins=", None, "set of Qt plugins to include in the application bundle (default None)"),
("matplotlib-backends=", None, "set of matplotlib backends to include (default: include entire package)"),
("extra-scripts=", None, "set of scripts to include in the application bundle, next to the main application script"),
("include-plugins=", None, "List of plugins to include"),
("force-system-tk", None, "Ensure that Tkinter is linked against Apple's build of Tcl/Tk"),
("report-missing-from-imports", None, "Report the list of missing names for 'from module import name'"),
("no-report-missing-conditional-import", None, "Don't report missing modules when they appear to be conditional imports"),
]
boolean_options = [
#"compressed",
"xref",
"strip",
"no-strip",
"site-packages",
"semi-standalone",
"alias",
"argv-emulation",
#"no-zip",
"use-pythonpath",
"use-faulthandler",
"verbose-interpreter",
"no-chdir",
"debug-modulegraph",
"debug-skip-macholib",
"graph",
"prefer-ppc",
"emulate-shell-environment",
"force-system-tk",
"report-missing-from-imports",
"no-report-missing-conditional-import",
]
def initialize_options (self):
self.app = None
self.plugin = None
self.bdist_base = None
self.xref = False
self.graph = False
self.no_zip = 0
self.optimize = 0
if hasattr(sys, 'flags'):
self.optimize = sys.flags.optimize
self.arch = None
self.strip = True
self.no_strip = False
self.iconfile = None
self.extension = None
self.alias = 0
self.argv_emulation = 0
self.emulate_shell_environment = 0
self.argv_inject = None
self.no_chdir = 0
self.site_packages = False
self.use_pythonpath = False
self.use_faulthandler = False
self.verbose_interpreter = False
self.includes = None
self.packages = None
self.excludes = None
self.dylib_excludes = None
self.frameworks = None
self.resources = None
self.datamodels = None
self.mappingmodels = None
self.plist = None
self.compressed = True
self.semi_standalone = is_system()
self.dist_dir = None
self.debug_skip_macholib = False
self.debug_modulegraph = False
self.prefer_ppc = False
self.filters = []
self.eggs = []
self.qt_plugins = None
self.matplotlib_backends = None
self.extra_scripts = None
self.include_plugins = None
self.force_system_tk = False
self.report_missing_from_imports = False
self.no_report_missing_conditional_import = False
def finalize_options (self):
if not self.strip:
self.no_strip = True
elif self.no_strip:
self.strip = False
self.optimize = int(self.optimize)
if self.argv_inject and isinstance(self.argv_inject, basestring):
self.argv_inject = shlex.split(self.argv_inject)
self.includes = set(fancy_split(self.includes))
self.includes.add('encodings.*')
if self.use_faulthandler:
self.includes.add('faulthandler')
#if sys.version_info[:2] >= (3, 2):
# self.includes.add('pkgutil')
# self.includes.add('imp')
self.packages = set(fancy_split(self.packages))
self.excludes = set(fancy_split(self.excludes))
self.excludes.add('readline')
# included by apptemplate
self.excludes.add('site')
if getattr(self.distribution, 'install_requires', None):
self.includes.add('pkg_resources')
self.eggs = pkg_resources.require(self.distribution.install_requires)
# Setuptools/distribute style namespace packages uses
# __import__('pkg_resources'), and that import isn't detected at the
# moment. Forcefully include pkg_resources.
self.includes.add('pkg_resources')
dylib_excludes = fancy_split(self.dylib_excludes)
self.dylib_excludes = []
for fn in dylib_excludes:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
try:
res = macholib.dyld.dyld_find(fn)
except ValueError:
res = fn
self.dylib_excludes.append(res)
self.resources = fancy_split(self.resources)
frameworks = fancy_split(self.frameworks)
self.frameworks = []
for fn in frameworks:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
res = macholib.dyld.dyld_find(fn)
while res in self.dylib_excludes:
self.dylib_excludes.remove(res)
self.frameworks.append(res)
if not self.plist:
self.plist = {}
if isinstance(self.plist, basestring):
self.plist = plistlib.Plist.fromFile(self.plist)
if isinstance(self.plist, plistlib.Dict):
self.plist = dict(self.plist.__dict__)
else:
self.plist = dict(self.plist)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('bdist_base', 'bdist_base'))
if self.semi_standalone:
self.filters.append(not_stdlib_filter)
if self.iconfile is None and 'CFBundleIconFile' not in self.plist:
# Default is the generic applet icon in the framework
iconfile = os.path.join(sys.prefix, 'Resources', 'Python.app',
'Contents', 'Resources', 'PythonApplet.icns')
if os.path.exists(iconfile):
self.iconfile = iconfile
self.runtime_preferences = list(self.get_runtime_preferences())
self.qt_plugins = fancy_split(self.qt_plugins)
self.matplotlib_backends = fancy_split(self.matplotlib_backends)
self.extra_scripts = fancy_split(self.extra_scripts)
self.include_plugins = fancy_split(self.include_plugins)
if self.datamodels:
print("WARNING: the datamodels option is deprecated, add model files to the list of resources")
if self.mappingmodels:
print("WARNING: the mappingmodels option is deprecated, add model files to the list of resources")
def get_default_plist(self):
# XXX - this is all single target stuff
plist = {}
target = self.targets[0]
version = self.distribution.get_version()
if version == '0.0.0':
try:
version = find_version(target.script)
except ValueError:
pass
if not isinstance(version, basestring):
raise DistutilsOptionError("Version must be a string")
if sys.version_info[0] > 2 and isinstance(version, type('a'.encode('ascii'))):
raise DistutilsOptionError("Version must be a string")
plist['CFBundleVersion'] = version
name = self.distribution.get_name()
if name == 'UNKNOWN':
base = target.get_dest_base()
name = os.path.basename(base)
plist['CFBundleName'] = name
return plist
def get_runtime(self, prefix=None, version=None):
# XXX - this is a bit of a hack!
# ideally we'd use dylib functions to figure this out
if prefix is None:
prefix = sys.prefix
if version is None:
version = sys.version
version = version[:3]
info = None
if os.path.exists(os.path.join(prefix, ".Python")):
# We're in a virtualenv environment, locate the real prefix
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
try:
fmwk = macholib.dyld.framework_find(prefix)
except ValueError:
info = None
else:
info = macholib.dyld.framework_info(fmwk)
if info is not None:
dylib = info['name']
runtime = os.path.join(info['location'], info['name'])
else:
dylib = 'libpython%s.dylib' % (sys.version[:3],)
runtime = os.path.join(prefix, 'lib', dylib)
return dylib, runtime
def symlink(self, src, dst):
try:
os.remove(dst)
except OSError:
pass
os.symlink(src, dst)
def get_runtime_preferences(self, prefix=None, version=None):
dylib, runtime = self.get_runtime(prefix=prefix, version=version)
yield os.path.join('@executable_path', '..', 'Frameworks', dylib)
if self.semi_standalone or self.alias:
yield runtime
def run(self):
if get_config_var('PYTHONFRAMEWORK') is None:
if not get_config_var('Py_ENABLE_SHARED'):
raise DistutilsPlatformError("This python does not have a shared library or framework")
else:
# Issue .. in py2app's tracker, and issue .. in python's tracker: a unix-style shared
# library build did not read the application environment correctly. The collection of
# if statements below gives a clean error message when py2app is started, instead of
# building a bundle that will give a confusing error message when started.
msg = "py2app is not supported for a shared library build with this version of python"
if sys.version_info[:2] < (2,7):
raise DistutilsPlatformError(msg)
elif sys.version_info[:2] == (2,7) and sys.version[3] < 4:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] < 2:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 2 and sys.version_info[3] < 3:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 3 and sys.version_info[3] < 1:
raise DistutilsPlatformError(msg)
if hasattr(self.distribution, "install_requires") \
and self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
build = self.reinitialize_command('build')
build.build_base = self.bdist_base
build.run()
self.create_directories()
self.fixup_distribution()
self.initialize_plist()
sys_old_path = sys.path[:]
extra_paths = [
os.path.dirname(target.script)
for target in self.targets
]
extra_paths.extend([build.build_platlib, build.build_lib])
self.additional_paths = [
os.path.abspath(p)
for p in extra_paths
if p is not None
]
sys.path[:0] = self.additional_paths
# this needs additional_paths
self.initialize_prescripts()
try:
self._run()
finally:
sys.path = sys_old_path
def iter_datamodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.datamodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcdatamodel':
basefn = fn
fn += '.xcdatamodel'
destfn = os.path.basename(basefn) + '.mom'
yield fn, os.path.join(resdir, path, destfn)
def compile_datamodels(self, resdir):
for src, dest in self.iter_datamodels(resdir):
print("compile datamodel", src, "->", dest)
self.mkpath(os.path.dirname(dest))
momc(src, dest)
def iter_mappingmodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.mappingmodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcmappingmodel':
basefn = fn
fn += '.xcmappingmodel'
destfn = os.path.basename(basefn) + '.cdm'
yield fn, os.path.join(resdir, path, destfn)
def compile_mappingmodels(self, resdir):
for src, dest in self.iter_mappingmodels(resdir):
self.mkpath(os.path.dirname(dest))
mapc(src, dest)
def iter_extra_plugins(self):
for item in self.include_plugins:
if isinstance(item, (list, tuple)):
subdir, path = item
else:
ext = os.path.splitext(item)[1]
try:
subdir = PLUGIN_SUFFIXES[ext]
path = item
except KeyError:
raise DistutilsOptionError("Cannot determine subdirectory for plugin %s"%(item,))
yield path, os.path.join(subdir, os.path.basename(path))
def iter_data_files(self):
dist = self.distribution
allres = chain(getattr(dist, 'data_files', ()) or (), self.resources)
for (path, files) in (normalize_data_file(fn) for fn in allres):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
yield fn, os.path.join(path, os.path.basename(fn))
def collect_scripts(self):
# these contains file names
scripts = set()
for target in self.targets:
scripts.add(target.script)
scripts.update([
k for k in target.prescripts if isinstance(k, basestring)
])
if hasattr(target, 'extra_scripts'):
scripts.update(target.extra_scripts)
scripts.update(self.extra_scripts)
return scripts
def get_plist_options(self):
result = dict(
PyOptions=dict(
use_pythonpath=bool(self.use_pythonpath),
site_packages=bool(self.site_packages),
alias=bool(self.alias),
argv_emulation=bool(self.argv_emulation),
emulate_shell_environment=bool(self.emulate_shell_environment),
no_chdir=bool(self.no_chdir),
prefer_ppc=self.prefer_ppc,
verbose=self.verbose_interpreter,
use_faulthandler=self.use_faulthandler,
),
)
if self.optimize:
result['PyOptions']['optimize'] = self.optimize
return result
def initialize_plist(self):
plist = self.get_default_plist()
for target in self.targets:
plist.update(getattr(target, 'plist', {}))
plist.update(self.plist)
plist.update(self.get_plist_options())
if self.iconfile:
iconfile = self.iconfile
if not os.path.exists(iconfile):
iconfile = iconfile + '.icns'
if not os.path.exists(iconfile):
raise DistutilsOptionError("icon file must exist: %r"
% (self.iconfile,))
self.resources.append(iconfile)
plist['CFBundleIconFile'] = os.path.basename(iconfile)
if self.prefer_ppc:
plist['LSPrefersPPC'] = True
self.plist = plist
return plist
def run_alias(self):
self.app_files = []
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.update(extra_scripts)
dst = self.build_alias_executable(target, target.script, extra_scripts)
self.app_files.append(dst)
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(target.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
def collect_recipedict(self):
return dict(iterRecipes())
def get_modulefinder(self):
if self.debug_modulegraph:
debug = 4
else:
debug = 0
return find_modules(
scripts=self.collect_scripts(),
includes=self.includes,
packages=self.packages,
excludes=self.excludes,
debug=debug,
)
def collect_filters(self):
return [has_filename_filter] + list(self.filters)
def process_recipes(self, mf, filters, flatpackages, loader_files):
rdict = self.collect_recipedict()
while True:
for name, check in rdict.items():
rval = check(self, mf)
if rval is None:
continue
# we can pull this off so long as we stop the iter
del rdict[name]
print('*** using recipe: %s ***' % (name,))
if rval.get('packages'):
self.packages.update(rval['packages'])
find_needed_modules(mf, packages=rval['packages'])
for pkg in rval.get('flatpackages', ()):
if isinstance(pkg, basestring):
pkg = (os.path.basename(pkg), pkg)
flatpackages[pkg[0]] = pkg[1]
filters.extend(rval.get('filters', ()))
loader_files.extend(rval.get('loader_files', ()))
newbootstraps = list(map(self.get_bootstrap,
rval.get('prescripts', ())))
if rval.get('includes'):
find_needed_modules(mf, includes=rval['includes'])
if rval.get('resources'):
self.resources.extend(rval['resources'])
for fn in newbootstraps:
if isinstance(fn, basestring):
mf.run_script(fn)
for target in self.targets:
target.prescripts.extend(newbootstraps)
break
else:
break
def _run(self):
try:
if self.alias:
self.run_alias()
else:
self.run_normal()
except:
raise
# XXX - remove when not debugging
# distutils sucks
import pdb, sys, traceback
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
print("Done!")
def filter_dependencies(self, mf, filters):
print("*** filtering dependencies ***")
nodes_seen, nodes_removed, nodes_orphaned = mf.filterStack(filters)
print('%d total' % (nodes_seen,))
print('%d filtered' % (nodes_removed,))
print('%d orphaned' % (nodes_orphaned,))
print('%d remaining' % (nodes_seen - nodes_removed,))
def get_appname(self):
return self.plist['CFBundleName']
def build_xref(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.html')
print("*** creating dependency html: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.create_xref(fp)
def build_graph(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.dot')
print("*** creating dependency graph: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.graphreport(fp, flatpackages=flatpackages)
def finalize_modulefinder(self, mf):
for item in mf.flatten():
if isinstance(item, Package) and item.filename == '-':
if sys.version_info[:2] <= (3,3):
fn = os.path.join(self.temp_dir, 'empty_package', '__init__.py')
if not os.path.exists(fn):
dn = os.path.dirname(fn)
if not os.path.exists(dn):
os.makedirs(dn)
with open(fn, 'w') as fp:
pass
item.filename = fn
py_files, extensions = parse_mf_results(mf)
# Remove all top-level scripts from the list of python files,
# those get treated differently.
py_files = [ item for item in py_files if not isinstance(item, Script) ]
extensions = list(extensions)
return py_files, extensions
def collect_packagedirs(self):
return list(filter(os.path.exists, [
os.path.join(os.path.realpath(self.get_bootstrap(pkg)), '')
for pkg in self.packages
]))
def run_normal(self):
mf = self.get_modulefinder()
filters = self.collect_filters()
flatpackages = {}
loader_files = []
self.process_recipes(mf, filters, flatpackages, loader_files)
if self.debug_modulegraph:
import pdb
pdb.Pdb().set_trace()
self.filter_dependencies(mf, filters)
if self.graph:
self.build_graph(mf, flatpackages)
if self.xref:
self.build_xref(mf, flatpackages)
py_files, extensions = self.finalize_modulefinder(mf)
pkgdirs = self.collect_packagedirs()
self.create_binaries(py_files, pkgdirs, extensions, loader_files)
missing = []
syntax_error = []
invalid_bytecode = []
for module in mf.nodes():
if isinstance(module, modulegraph.MissingModule):
if module.identifier != '__main__':
missing.append(module)
elif isinstance(module, modulegraph.InvalidSourceModule):
syntax_error.append(module)
elif hasattr(modulegraph, 'InvalidCompiledModule') and isinstance(module, modulegraph.InvalidCompiledModule):
invalid_bytecode.append(module)
if missing:
missing_unconditional = collections.defaultdict(set)
missing_fromimport = collections.defaultdict(set)
missing_fromimport_conditional = collections.defaultdict(set)
missing_conditional = collections.defaultdict(set)
for module in sorted(missing):
for m in mf.getReferers(module):
if m is None: continue # XXX
try:
ed = mf.edgeData(m, module)
except KeyError:
ed = None
if hasattr(modulegraph, 'DependencyInfo') and isinstance(ed, modulegraph.DependencyInfo):
c = missing_unconditional
if ed.conditional or ed.function:
if ed.fromlist:
c = missing_fromimport_conditional
else:
c = missing_conditional
elif ed.fromlist:
c = missing_fromimport
c[module.identifier].add(m.identifier)
else:
missing_unconditional[module.identifier].add(m.identifier)
if missing_unconditional:
log.warn("Modules not found (unconditional imports):")
for m in sorted(missing_unconditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_unconditional[m]))))
log.warn("")
if missing_conditional and not self.no_report_missing_conditional_import:
log.warn("Modules not found (conditional imports):")
for m in sorted(missing_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_conditional[m]))))
log.warn("")
if self.report_missing_from_imports and (
missing_fromimport or (
not self.no_report_missing_conditional_import and missing_fromimport_conditional)):
log.warn("Modules not found ('from ... import y'):")
for m in sorted(missing_fromimport):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport[m]))))
if not self.no_report_missing_conditional_import and missing_fromimport_conditional:
log.warn("")
log.warn("Conditional:")
for m in sorted(missing_fromimport_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport_conditional[m]))))
log.warn("")
if syntax_error:
log.warn("Modules with syntax errors:")
for module in sorted(syntax_error):
log.warn(" * %s"%(module.identifier))
log.warn("")
if invalid_bytecode:
log.warn("Modules with invalid bytecode:")
for module in sorted(invalid_bytecode):
log.warn(" * %s"%(module.identifier))
log.warn("")
def create_directories(self):
bdist_base = self.bdist_base
if self.semi_standalone:
self.bdist_dir = os.path.join(bdist_base,
'python%s-semi_standalone' % (sys.version[:3],), 'app')
else:
self.bdist_dir = os.path.join(bdist_base,
'python%s-standalone' % (sys.version[:3],), 'app')
if os.path.exists(self.bdist_dir):
shutil.rmtree(self.bdist_dir)
self.collect_dir = os.path.abspath(
os.path.join(self.bdist_dir, "collect"))
self.mkpath(self.collect_dir)
self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp"))
self.mkpath(self.temp_dir)
self.dist_dir = os.path.abspath(self.dist_dir)
self.mkpath(self.dist_dir)
self.lib_dir = os.path.join(self.bdist_dir,
os.path.dirname(get_zipfile(self.distribution, self.semi_standalone)))
self.mkpath(self.lib_dir)
self.ext_dir = os.path.join(self.lib_dir, 'lib-dynload')
self.mkpath(self.ext_dir)
self.framework_dir = os.path.join(self.bdist_dir, 'Frameworks')
self.mkpath(self.framework_dir)
def create_binaries(self, py_files, pkgdirs, extensions, loader_files):
print("*** create binaries ***")
dist = self.distribution
pkgexts = []
copyexts = []
extmap = {}
def packagefilter(mod, pkgdirs=pkgdirs):
fn = os.path.realpath(getattr(mod, 'filename', None))
if fn is None:
return None
for pkgdir in pkgdirs:
if fn.startswith(pkgdir):
return None
return fn
if pkgdirs:
py_files = list(filter(packagefilter, py_files))
for ext in extensions:
fn = packagefilter(ext)
if fn is None:
fn = os.path.realpath(getattr(ext, 'filename', None))
pkgexts.append(ext)
else:
if '.' in ext.identifier:
py_files.append(self.create_loader(ext))
copyexts.append(ext)
extmap[fn] = ext
# byte compile the python modules into the target directory
print("*** byte compile python files ***")
byte_compile(py_files,
target_dir=self.collect_dir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
for item in py_files:
if not isinstance(item, Package): continue
self.copy_package_data(item, self.collect_dir)
self.lib_files = []
self.app_files = []
# create the shared zipfile containing all Python modules
archive_name = os.path.join(self.lib_dir,
get_zipfile(dist, self.semi_standalone))
for path, files in loader_files:
dest = os.path.join(self.collect_dir, path)
self.mkpath(dest)
for fn in files:
destfn = os.path.join(dest, os.path.basename(fn))
if os.path.isdir(fn):
self.copy_tree(fn, destfn, preserve_symlinks=False)
else:
self.copy_file(fn, destfn)
arcname = self.make_lib_archive(archive_name,
base_dir=self.collect_dir, verbose=self.verbose,
dry_run=self.dry_run)
# XXX: this doesn't work with python3
#self.lib_files.append(arcname)
# build the executables
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.extend(target.extra_scripts)
dst = self.build_executable(
target, arcname, pkgexts, copyexts, target.script, extra_scripts)
exp = os.path.join(dst, 'Contents', 'MacOS')
execdst = os.path.join(exp, 'python')
if self.semi_standalone:
self.symlink(sys.executable, execdst)
else:
if os.path.exists(os.path.join(sys.prefix, ".Python")):
fn = os.path.join(sys.prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
rest_path = os.path.normpath(sys.executable)[len(os.path.normpath(sys.prefix))+1:]
if rest_path.startswith('.'):
rest_path = rest_path[1:]
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(os.path.join(prefix, rest_path), execdst)
else:
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(sys.executable, execdst)
if not self.debug_skip_macholib:
if self.force_system_tk:
print("force system tk")
resdir = os.path.join(dst, 'Contents', 'Resources')
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
tkinter_path = os.path.join(ext_dir, '_tkinter.so')
if os.path.exists(tkinter_path):
rewrite_tkinter_load_commands(tkinter_path)
else:
print("tkinter not found at", tkinter_path)
mm = PythonStandalone(self, dst, executable_path=exp)
dylib, runtime = self.get_runtime()
if self.semi_standalone:
mm.excludes.append(runtime)
else:
mm.mm.run_file(runtime)
for exclude in self.dylib_excludes:
info = macholib.dyld.framework_info(exclude)
if info is not None:
exclude = os.path.join(
info['location'], info['shortname'] + '.framework')
mm.excludes.append(exclude)
for fmwk in self.frameworks:
mm.mm.run_file(fmwk)
platfiles = mm.run()
if self.strip:
platfiles = self.strip_dsym(platfiles)
self.strip_files(platfiles)
self.app_files.append(dst)
def copy_package_data(self, package, target_dir):
"""
Copy any package data in a python package into the target_dir.
This is a bit of a hack, it would be better to identify python eggs
and copy those in whole.
"""
exts = [ i[0] for i in imp.get_suffixes() ]
exts.append('.py')
exts.append('.pyc')
exts.append('.pyo')
def datafilter(item):
for e in exts:
if item.endswith(e):
return False
return True
target_dir = os.path.join(target_dir, *(package.identifier.split('.')))
for dname in package.packagepath:
filenames = list(filter(datafilter, zipio.listdir(dname)))
for fname in filenames:
if fname in ('.svn', 'CVS', '.hg', '.git'):
# Scrub revision manager junk
continue
if fname in ('__pycache__',):
# Ignore PEP 3147 bytecode cache
continue
if fname.startswith('.') and fname.endswith('.swp'):
# Ignore vim(1) temporary files
continue
if fname.endswith('~') or fname.endswith('.orig'):
# Ignore backup files for common tools (hg, emacs, ...)
continue
pth = os.path.join(dname, fname)
# Check if we have found a package, exclude those
if zipio.isdir(pth):
# XXX: the 'and not' part is wrong, need to fix zipio.isdir
for p in zipio.listdir(pth):
if p.startswith('__init__.') and p[8:] in exts:
break
else:
if os.path.isfile(pth):
# Avoid extracting a resource file that happens
# to be zipfile.
# XXX: Need API in zipio for nicer code.
copy_file(pth, os.path.join(target_dir, fname))
else:
copy_tree(pth, os.path.join(target_dir, fname))
continue
elif zipio.isdir(pth) and (
zipio.isfile(os.path.join(pth, '__init__.py'))
or zipio.isfile(os.path.join(pth, '__init__.pyc'))
or zipio.isfile(os.path.join(pth, '__init__.pyo'))):
# Subdirectory is a python package, these will get included later on
# when the subpackage itself is included, ignore for now.
pass
else:
copy_file(pth, os.path.join(target_dir, fname))
def strip_dsym(self, platfiles):
""" Remove .dSYM directories in the bundled application """
#
# .dSYM directories are contain detached debugging information and
# should be completely removed when the "strip" option is specified.
#
if self.dry_run:
return platfiles
for dirpath, dnames, fnames in os.walk(self.appdir):
for nm in list(dnames):
if nm.endswith('.dSYM'):
print("removing debug info: %s/%s"%(dirpath, nm))
shutil.rmtree(os.path.join(dirpath, nm))
dnames.remove(nm)
return [file for file in platfiles if '.dSYM' not in file]
def strip_files(self, files):
unstripped = 0
stripfiles = []
for fn in files:
unstripped += os.stat(fn).st_size
stripfiles.append(fn)
log.info('stripping %s', os.path.basename(fn))
strip_files(stripfiles, dry_run=self.dry_run, verbose=self.verbose)
stripped = 0
for fn in stripfiles:
stripped += os.stat(fn).st_size
log.info('stripping saved %d bytes (%d / %d)',
unstripped - stripped, stripped, unstripped)
def copy_dylib(self, src, dst):
# will be copied from the framework?
if src != sys.executable:
force, self.force = self.force, True
self.copy_file(src, dst)
self.force = force
return dst
def copy_versioned_framework(self, info, dst):
# XXX - Boy is this ugly, but it makes sense because the developer
# could have both Python 2.3 and 2.4, or Tk 8.4 and 8.5, etc.
# Saves a good deal of space, and I'm pretty sure this ugly
# hack is correct in the general case.
version = info['version']
if version is None:
return self.raw_copy_framework(info, dst)
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
vsplit = os.path.join(infile, 'Versions').split(os.sep)
def condition(src, vsplit=vsplit, version=version):
srcsplit = src.split(os.sep)
if (
len(srcsplit) > len(vsplit) and
srcsplit[:len(vsplit)] == vsplit and
srcsplit[len(vsplit)] != version and
not os.path.islink(src)
):
return False
# Skip Headers, .svn, and CVS dirs
return framework_copy_condition(src)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=condition)
def copy_framework(self, info, dst):
force, self.force = self.force, True
if info['shortname'] == PYTHONFRAMEWORK:
self.copy_python_framework(info, dst)
else:
self.copy_versioned_framework(info, dst)
self.force = force
return os.path.join(dst, info['name'])
def raw_copy_framework(self, info, dst):
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=framework_copy_condition)
def copy_python_framework(self, info, dst):
# XXX - In this particular case we know exactly what we can
# get away with.. should this be extended to the general
# case? Per-framework recipes?
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
indir = os.path.dirname(os.path.join(info['location'], info['name']))
outdir = os.path.dirname(os.path.join(dst, info['name']))
self.mkpath(os.path.join(outdir, 'Resources'))
pydir = 'python%s.%s'%(sys.version_info[:2])
# Create a symlink "for Python.frameworks/Versions/Current". This
# is required for the Mac App-store.
os.symlink(
os.path.basename(outdir),
os.path.join(os.path.dirname(outdir), "Current"))
# Likewise for two links in the root of the framework:
os.symlink(
'Versions/Current/Resources',
os.path.join(os.path.dirname(os.path.dirname(outdir)), 'Resources'))
os.symlink(
os.path.join('Versions/Current', PYTHONFRAMEWORK),
os.path.join(os.path.dirname(os.path.dirname(outdir)), PYTHONFRAMEWORK))
# Experiment for issue 57
if not os.path.exists(os.path.join(indir, 'include')):
alt = os.path.join(indir, 'Versions/Current')
if os.path.exists(os.path.join(alt, 'include')):
indir = alt
# distutils looks for some files relative to sys.executable, which
# means they have to be in the framework...
self.mkpath(os.path.join(outdir, 'include'))
self.mkpath(os.path.join(outdir, 'include', includedir))
self.mkpath(os.path.join(outdir, 'lib'))
self.mkpath(os.path.join(outdir, 'lib', pydir))
self.mkpath(os.path.join(outdir, 'lib', pydir, configdir))
fmwkfiles = [
os.path.basename(info['name']),
'Resources/Info.plist',
'include/%s/pyconfig.h'%(includedir),
]
if '_sysconfigdata' not in sys.modules:
fmwkfiles.append(
'lib/%s/%s/Makefile'%(pydir, configdir)
)
for fn in fmwkfiles:
self.copy_file(
os.path.join(indir, fn),
os.path.join(outdir, fn))
def fixup_distribution(self):
dist = self.distribution
# Trying to obtain app and plugin from dist for backward compatibility
# reasons.
app = dist.app
plugin = dist.plugin
# If we can get suitable values from self.app and self.plugin, we prefer
# them.
if self.app is not None or self.plugin is not None:
app = self.app
plugin = self.plugin
# Convert our args into target objects.
dist.app = FixupTargets(app, "script")
dist.plugin = FixupTargets(plugin, "script")
if dist.app and dist.plugin:
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
elif dist.app:
self.style = 'app'
self.targets = dist.app
elif dist.plugin:
self.style = 'plugin'
self.targets = dist.plugin
else:
raise DistutilsOptionError(
"You must specify either app or plugin")
if len(self.targets) != 1:
# XXX - support multiple targets?
raise DistutilsOptionError(
"Multiple targets not currently supported")
if not self.extension:
self.extension = '.' + self.style
# make sure all targets use the same directory, this is
# also the directory where the pythonXX.dylib must reside
paths = set()
for target in self.targets:
paths.add(os.path.dirname(target.get_dest_base()))
if len(paths) > 1:
raise DistutilsOptionError(
"all targets must use the same directory: %s" %
([p for p in paths],))
if paths:
app_dir = paths.pop() # the only element
if os.path.isabs(app_dir):
raise DistutilsOptionError(
"app directory must be relative: %s" % (app_dir,))
self.app_dir = os.path.join(self.dist_dir, app_dir)
self.mkpath(self.app_dir)
else:
# Do we allow to specify no targets?
# We can at least build a zipfile...
self.app_dir = self.lib_dir
def initialize_prescripts(self):
prescripts = []
prescripts.append('reset_sys_path')
if self.semi_standalone:
prescripts.append('semi_standalone_path')
if 0 and sys.version_info[:2] >= (3, 2) and not self.alias:
# Python 3.2 or later requires a more complicated
# bootstrap
prescripts.append('import_encodings')
if os.path.exists(os.path.join(sys.prefix, ".Python")):
# We're in a virtualenv, which means sys.path
# will be broken in alias builds unless we fix
# it.
if self.alias or self.semi_standalone:
prescripts.append("virtualenv")
prescripts.append(StringIO('_fixup_virtualenv(%r)' % (sys.real_prefix,)))
if self.site_packages or self.alias:
import site
global_site_packages = not os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt'))
prescripts.append('virtualenv_site_packages')
prescripts.append(StringIO('_site_packages(%r, %r, %d)' % (
sys.prefix, sys.real_prefix, global_site_packages)))
elif self.site_packages or self.alias:
prescripts.append('site_packages')
if is_system():
prescripts.append('system_path_extras')
#if self.style == 'app':
# prescripts.append('setup_pkgresource')
included_subpkg = [pkg for pkg in self.packages if '.' in pkg]
if included_subpkg:
prescripts.append('setup_included_subpackages')
prescripts.append(StringIO('_path_hooks = %r'%(
included_subpkg)))
if self.emulate_shell_environment:
prescripts.append('emulate_shell_environment')
if self.argv_emulation and self.style == 'app':
prescripts.append('argv_emulation')
if 'CFBundleDocumentTypes' not in self.plist:
self.plist['CFBundleDocumentTypes'] = [
{
'CFBundleTypeOSTypes' : [
'****',
'fold',
'disk',
],
'CFBundleTypeRole': 'Viewer'
},
]
if self.argv_inject is not None:
prescripts.append('argv_inject')
prescripts.append(
StringIO('_argv_inject(%r)\n' % (self.argv_inject,)))
if self.style == 'app' and not self.no_chdir:
prescripts.append('chdir_resource')
if not self.alias:
prescripts.append('disable_linecache')
prescripts.append('boot_' + self.style)
else:
# Add ctypes prescript because it is needed to
# find libraries in the bundle, but we don't run
# recipes and hence the ctypes recipe is not used
# for alias builds.
prescripts.append('ctypes_setup')
if self.additional_paths:
prescripts.append('path_inject')
prescripts.append(
StringIO('_path_inject(%r)\n' % (self.additional_paths,)))
prescripts.append('boot_alias' + self.style)
newprescripts = []
for s in prescripts:
if isinstance(s, basestring):
newprescripts.append(
self.get_bootstrap('py2app.bootstrap.' + s))
else:
newprescripts.append(s)
for target in self.targets:
prescripts = getattr(target, 'prescripts', [])
target.prescripts = newprescripts + prescripts
def get_bootstrap(self, bootstrap):
if isinstance(bootstrap, basestring):
if not os.path.exists(bootstrap):
bootstrap = imp_find_module(bootstrap)[1]
return bootstrap
def get_bootstrap_data(self, bootstrap):
bootstrap = self.get_bootstrap(bootstrap)
if not isinstance(bootstrap, basestring):
return bootstrap.getvalue()
else:
with open(bootstrap, 'rU') as fp:
return fp.read()
def create_pluginbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating plugin bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
appdir, plist = create_pluginbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_appbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating application bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
pythonInfo = self.plist.setdefault('PythonInfoDict', {})
py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict(
alias=bool(self.alias),
))
appdir, plist = create_appbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_bundle(self, target, script, use_runtime_preference=True):
fn = getattr(self, 'create_%sbundle' % (self.style,))
return fn(
target,
script,
use_runtime_preference=use_runtime_preference
)
def iter_frameworks(self):
for fn in self.frameworks:
fmwk = macholib.dyld.framework_info(fn)
if fmwk is None:
yield fn
else:
basename = fmwk['shortname'] + '.framework'
yield os.path.join(fmwk['location'], basename)
def build_alias_executable(self, target, script, extra_scripts):
# Build an alias executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
# symlink python executable
execdst = os.path.join(appdir, 'Contents', 'MacOS', 'python')
prefixPathExecutable = os.path.join(sys.prefix, 'bin', 'python')
if os.path.exists(prefixPathExecutable):
pyExecutable = prefixPathExecutable
else:
pyExecutable = sys.executable
self.symlink(pyExecutable, execdst)
# make PYTHONHOME
pyhome = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
makedirs(pyhome)
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pyhome, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pyhome, 'site.pyc'))
self.symlink(
os.path.join(realhome, 'config'),
os.path.join(pyhome, 'config'))
# symlink data files
# XXX: fixme: need to integrate automatic data conversion
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run, symlink=1)
except:
import traceback
traceback.print_exc()
raise
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run)
except:
import traceback
traceback.print_exc()
raise
# symlink frameworks
for src in self.iter_frameworks():
dest = os.path.join(
appdir, 'Contents', 'Frameworks', os.path.basename(src))
if src == dest:
continue
makedirs(os.path.dirname(dest))
self.symlink(os.path.abspath(src), dest)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.realpath(script),))
script_map = {}
for fn in extra_scripts:
tgt = os.path.realpath(fn)
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = tgt
elif fn.endswith('.py'):
script_map[fn[:-4]] = tgt
else:
script_map[fn] = tgt
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('try:\n')
bootfile.write(' _run()\n')
bootfile.write('except KeyboardInterrupt:\n')
bootfile.write(' pass\n')
bootfile.close()
target.appdir = appdir
return appdir
def build_executable(self, target, arcname, pkgexts, copyexts, script, extra_scripts):
# Build an executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
self.appdir = appdir
self.resdir = resdir
self.plist = plist
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(self.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
site_path = os.path.join(resdir, 'site.py')
byte_compile([
SourceModule('site', site_path),
],
target_dir=resdir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
if not self.dry_run:
os.unlink(site_path)
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.basename(script),))
script_map = {}
for fn in extra_scripts:
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = fn
elif fn.endswith('.py'):
script_map[fn[:-4]] = fn
else:
script_map[fn] = fn
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('_run()\n')
bootfile.close()
self.copy_file(script, resdir)
for fn in extra_scripts:
self.copy_file(fn, resdir)
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
if sys.version_info[0] == 2 or self.semi_standalone:
arcdir = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
else:
arcdir = os.path.join(resdir, 'lib')
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
self.mkpath(pydir)
# The site.py file needs to be a two locations
# 1) in lib/pythonX.Y, to be found during normal startup and
# by the 'python' executable
# 2) in the resources directory next to the script for
# semistandalone builds (the lib/pythonX.Y directory is too
# late on sys.path to be found in that case).
#
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pydir, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pydir, 'site.pyc'))
cfgdir = os.path.join(pydir, configdir)
realcfg = os.path.join(realhome, configdir)
real_include = os.path.join(sys.prefix, 'include')
if self.semi_standalone:
self.symlink(realcfg, cfgdir)
self.symlink(real_include, os.path.join(resdir, 'include'))
else:
self.mkpath(cfgdir)
if '_sysconfigdata' not in sys.modules:
# Recent enough versions of Python 2.7 and 3.x have
# an _sysconfigdata module and don't need the Makefile
# to provide the sysconfig data interface. Don't copy
# them.
for fn in 'Makefile', 'Setup', 'Setup.local', 'Setup.config':
rfn = os.path.join(realcfg, fn)
if os.path.exists(rfn):
self.copy_file(rfn, os.path.join(cfgdir, fn))
inc_dir = os.path.join(resdir, 'include', includedir)
self.mkpath(inc_dir)
self.copy_file(get_config_h_filename(),
os.path.join(inc_dir, 'pyconfig.h'))
self.copy_file(arcname, arcdir)
if sys.version_info[0] != 2:
import zlib
self.copy_file(zlib.__file__, os.path.dirname(arcdir))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
self.copy_tree(self.ext_dir, ext_dir, preserve_symlinks=True)
self.copy_tree(self.framework_dir,
os.path.join(appdir, 'Contents', 'Frameworks'),
preserve_symlinks=True)
for pkg_name in self.packages:
pkg = self.get_bootstrap(pkg_name)
print('XXXX', pkg_name, pkg)
if self.semi_standalone:
# For semi-standalone builds don't copy packages
# from the stdlib into the app bundle, even when
# they are mentioned in self.packages.
p = Package(pkg_name, pkg)
if not not_stdlib_filter(p):
continue
dst = os.path.join(pydir, pkg_name)
self.mkpath(dst)
self.copy_tree(pkg, dst)
# FIXME: The python files should be bytecompiled
# here (see issue 101)
for copyext in copyexts:
fn = os.path.join(ext_dir,
(copyext.identifier.replace('.', os.sep) +
os.path.splitext(copyext.filename)[1])
)
self.mkpath(os.path.dirname(fn))
copy_file(copyext.filename, fn, dry_run=self.dry_run)
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
target.appdir = appdir
return appdir
def create_loader(self, item):
# Hm, how to avoid needless recreation of this file?
slashname = item.identifier.replace('.', os.sep)
pathname = os.path.join(self.temp_dir, "%s.py" % slashname)
if os.path.exists(pathname):
if self.verbose:
print("skipping python loader for extension %r"
% (item.identifier,))
else:
self.mkpath(os.path.dirname(pathname))
# and what about dry_run?
if self.verbose:
print("creating python loader for extension %r"
% (item.identifier,))
fname = slashname + os.path.splitext(item.filename)[1]
source = make_loader(fname)
if not self.dry_run:
with open(pathname, "w") as fp:
fp.write(source)
else:
return
return SourceModule(item.identifier, pathname)
def make_lib_archive(self, zip_filename, base_dir, verbose=0,
dry_run=0):
# Like distutils "make_archive", except we can specify the
# compression to use - default is ZIP_STORED to keep the
# runtime performance up.
# Also, we don't append '.zip' to the filename.
from distutils.dir_util import mkpath
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
if self.compressed:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=compression)
save_cwd = os.getcwd()
os.chdir(base_dir)
for dirpath, dirnames, filenames in os.walk('.'):
if filenames:
# Ensure that there are directory entries for
# all directories in the zipfile. This is a
# workaround for <http://bugs.python.org/issue14905>:
# zipimport won't consider 'pkg/foo.py' to be in
# namespace package 'pkg' unless there is an
# entry for the directory (or there is a
# pkg/__init__.py file as well)
z.write(dirpath, dirpath)
for fn in filenames:
path = os.path.normpath(os.path.join(dirpath, fn))
if os.path.isfile(path):
z.write(path, path)
os.chdir(save_cwd)
z.close()
return zip_filename
def copy_tree(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1, condition=None):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
This version doesn't bork on existing symlinks
"""
return copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run,
condition=condition)
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/frame/test_query_eval.py | 11 | 42389 | # -*- coding: utf-8 -*-
from __future__ import print_function
import operator
import pytest
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import DataFrame, Series, Index, MultiIndex, date_range
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
from pandas.core.computation import _NUMEXPR_INSTALLED
from pandas.tests.frame.common import TestData
PARSERS = 'python', 'pandas'
ENGINES = 'python', 'numexpr'
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
pytest.skip("cannot evaluate with parser {0!r}".format(parser))
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
if not _NUMEXPR_INSTALLED:
pytest.skip("cannot query engine numexpr when numexpr not "
"installed")
class TestCompat(object):
def setup_method(self, method):
self.df = DataFrame({'A': [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query('A>0')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query('A>0', engine=None)
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine=None)
assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query('A>0', engine='python')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='python')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query('A>0', engine='numexpr')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='numexpr')
assert_series_equal(result, self.expected2, check_names=False)
else:
pytest.raises(ImportError,
lambda: df.query('A>0', engine='numexpr'))
pytest.raises(ImportError,
lambda: df.eval('A+1', engine='numexpr'))
class TestDataFrameEval(TestData):
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [4, 4000]:
df = DataFrame(1, index=range(n), columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__')]:
base = (DataFrame(np.tile(m.values, n) # noqa
.reshape(n, -1),
columns=list('abcd')))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result, expected)
# these are commutative
if op in ['+', '*']:
result = getattr(df, op)(m)
assert_frame_equal(result, expected)
# these are not
elif op in ['-', '/']:
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1 - np.isnan(df.iloc[0:25]))
result = (1 - np.isnan(df)).iloc[0:25]
assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'b']})
msg = "expr must be a string to be evaluated"
with tm.assert_raises_regex(ValueError, msg):
df.query(lambda x: x.B == "b")
with tm.assert_raises_regex(ValueError, msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({'A': [1, 2, 3]})
msg = "expr cannot be an empty string"
with tm.assert_raises_regex(ValueError, msg):
df.query('')
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(randn(10, 2), columns=list('ab'))
dict1 = {'a': 1}
dict2 = {'b': 2}
assert (df.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
assert (pd.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
class TestDataFrameQueryWithMultiIndex(object):
def test_query_with_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser,
engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser,
engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
def test_raise_on_panel_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
def test_raise_on_panel4d_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p4d = tm.makePanel4D(7)
p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p4d + 1', parser=parser, engine=engine)
class TestDataFrameQueryNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne(cls.engine)
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index.to_series() < '20130101') &
('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
ops = '==', '!=', '<', '>', '<=', '>='
for op in ops:
with pytest.raises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with pytest.raises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2 # noqa
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assert_raises_regex(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assert_raises_regex(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
assert_frame_equal(result, expected)
df = DataFrame({'index': a,
'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine,
parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
assert_frame_equal(result, expected)
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.frame = TestData().frame
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with pytest.raises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval('x + 1', engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with pytest.raises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPython, cls).setup_class()
cls.engine = cls.parser = 'python'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryStrings(object):
def test_str_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
pytest.raises(NotImplementedError, df.query, ex,
engine=engine, parser=parser,
local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_list_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with pytest.raises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with pytest.raises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
tm.skip_if_no_ne(engine)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
tm.skip_if_no_ne(engine=engine)
import operator as opr
a = Series(np.random.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
tm.skip_if_no_ne(engine)
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US' # noqa
r = df.query('Symbol == @symb', parser=parser, engine=engine)
assert_frame_equal(e, r)
class TestDataFrameEvalNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne()
def setup_method(self, method):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self):
res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self):
res = self.frame.eval('a[a < 1] + b', engine=self.engine,
parser=self.parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
def test_invalid_type_for_operator_raises(self):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
ops = '+', '-', '*', '/'
for op in ops:
with tm.assert_raises_regex(TypeError,
"unsupported operand type\(s\) "
"for .+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameEvalNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameEvalPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):
@classmethod
def setup_class(cls):
cls.engine = cls.parser = 'python'
| mit |
karvenka/sp17-i524 | project/S17-IR-P014/code/delay.py | 15 | 5276 | import sys
import csv
import sip
#import org.apache.log4j.{Level, Logger}
import matplotlib
#matplotlib.user('agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from pyspark import SparkContext, SparkConf
from datetime import datetime
from operator import add, itemgetter
from collections import namedtuple
from datetime import datetime
import os
import time
from StringIO import StringIO
#Defining the fields, Creating a Flights class with the following fields as a tuple
#Each row is converted into a list
timestarted = time.time()
fields = ('date', 'airline', 'flightnum', 'origin', 'dest', 'dep',
'dep_delay', 'arv', 'arv_delay', 'airtime', 'distance')
Flight = namedtuple('Flight', fields, verbose=True)
DATE_FMT = "%Y-%m-%d"
TIME_FMT = "%H%M"
# User Defined Functions
def toCSVLine(data):
return ','.join(str(d) for d in data)
def split(line):
reader = csv.reader(StringIO(line))
return reader.next()
def parse(row):
row[0] = datetime.strptime(row[0], DATE_FMT).time()
row[5] = datetime.strptime(row[5], TIME_FMT).time()
row[6] = float(row[6])
row[7] = datetime.strptime(row[7], TIME_FMT).time()
row[8] = float(row[8])
row[9] = float(row[9])
row[10] = float(row[10])
return Flight(*row[:11])
def notHeader(row):
return "Description" not in row
def plot(airlinesdelays):
airlines = [d[0] for d in airlinesdelays]
minutes = [d[1] for d in airlinesdelays]
index = list(xrange(len(airlines)))
#Above we retrieved the respective columns from the list
#Here we mention the plot as a horizontal bar plot
fig, axe = plt.subplots()
bars = axe.barh(index, minutes)
# Add the total minutes to the right
for idx, air, min in zip(index, airlines, minutes):
if min > 0:
bars[idx].set_color('#d9230f')
axe.annotate(" %0.0f min" % min, xy=(min+1, idx+0.5), va='center')
else:
bars[idx].set_color('#469408')
axe.annotate(" %0.0f min" % min, xy=(10, idx+0.5), va='center')
# Set the ticks
ticks = plt.yticks([idx+ 0.5 for idx in index], airlines)
xt = plt.xticks()[0]
plt.xticks(xt, [' '] * len(xt))
# minimize chart junk
plt.grid(axis = 'x', color ='white', linestyle='-')
plt.title('Total Minutes Delayed per Airline')
plt.savefig('airlines.png')
#airlines.filter(notHeader).take(10)
#main method is the entry point for the following program
if __name__ == "__main__":
conf = SparkConf().setAppName("average")
sc = SparkContext(conf=conf)
#setting log level to error
# val rootLogger = Logger.getRootLogger()
# rootLogger.setLevel(Level.ERROR)
#importing data from HDFS for performing analysis
airlines = sc.textFile(sys.argv[1])
# airlines = sc.textFile("hdfs://192.168.1.8:8020/fltdata/airlines.csv")
flights = sc.textFile(sys.argv[2])
airports =sc.textFile(sys.argv[3])
airlinesParsed = dict(airlines.map(split).collect())
airportsParsed= airports.filter(notHeader).map(split)
# print "without header and spliting up", airlines.take(10)
# print "without header and spliting up", airlines.take(10)
flightsParsed= flights.map(lambda x: x.split(',')).map(parse)
#print "The average delay is "+str(sumCount[0]/float(sumCount[1]))
airportDelays = flightsParsed.map(lambda x: (x.origin,x.dep_delay))
# First find the total delay per airport
airportTotalDelay=airportDelays.reduceByKey(lambda x,y:x+y)
# Find the count per airport
airportCount=airportDelays.mapValues(lambda x:1).reduceByKey(lambda x,y:x+y)
# Join to have the sum, count in 1 RDD
airportSumCount=airportTotalDelay.join(airportCount)
# Compute avg delay per airport
airportAvgDelay=airportSumCount.mapValues(lambda x : x[0]/float(x[1]))
airportDelay = airportAvgDelay.sortBy(lambda x:-x[1])
print "", airportDelay.take(10)
airportLookup=airportsParsed.collectAsMap()
#airlineLookup=airlinesParsed.collectAsMap()
airline_lookup = sc.broadcast(airlinesParsed)
airlinesdelays = flightsParsed.map(lambda f: (airline_lookup.value[f.airline],add(f.dep_delay, f.arv_delay)))
airlinesdelays = delays.reduceByKey(add).collect()
airlinesdelays = sorted(delays, key=itemgetter(1))
#tenairlines = delays.map(toCSVLine)
ten = airportAvgDelay.map(lambda x: (airportLookup[x[0]],x[1]))
#print "", ten.take(10)
for d in airlinesdelays:
print "%0.0f minutes delayed\t%s" % (d[1], d[0])
airportBC=sc.broadcast(airportLookup)
topTenAirportsWithDelays = airportAvgDelay.map(lambda x: (airportBC.value[x[0]],x[1])).sortBy(lambda x:-x[1])
lines = topTenAirportsWithDelays.take(10)
topten = "/home/hadoop/"
tenairlines = "/home/hadoop/"
#For collecting the outputs into csv files
with open('topten', "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in lines:
writer.writerows([val])
with open('tenairlines',"w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in delays:
writer.writerows([val])
plot(airlinesdelays)
#Final time taken will be calculated here
timetaken = time.time()-timestarted
print "", timetaken
| apache-2.0 |
montagnero/political-affiliation-prediction | newsreader.py | 2 | 11936 | # -*- coding: utf-8 -*-
from sklearn.decomposition import KernelPCA
from sklearn.metrics.pairwise import pairwise_distances
from scipy.stats.mstats import zscore
import glob
import json
import re
import datetime
import os
import cPickle
import codecs
import itertools
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy import double,triu,ones,hstack,arange,reshape,zeros,setdiff1d,array,zeros,eye,argmax,percentile
def get_news(sources=['spiegel','faz','welt','zeit'], folder='model'):
'''
Collects all news articles from political ressort of major German newspapers
Articles are transformed to BoW vectors and assigned to a political party
For better visualization, articles' BoW vectors are also clustered into topics
INPUT
folder the model folder containing classifier and BoW transformer
sources a list of strings for each newspaper for which a crawl is implemented
default ['zeit','sz']
'''
import classifier
from bs4 import BeautifulSoup
from api import fetch_url
import urllib2
news = dict([(source,[]) for source in sources])
# the classifier for prediction of political affiliation
clf = classifier.Classifier(folder=folder)
for source in sources:
if source is 'spiegel':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.spiegel.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("div", { "class" : "teaser" })
urls = ['http://www.spiegel.de'+a.findNext('a')['href'] for a in titles]
if source is 'faz':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.faz.net/aktuell/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("a", { "class" : "TeaserHeadLink" })
urls = ['http://www.faz.net'+a['href'] for a in titles]
if source is 'welt':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.welt.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("a", { "class" : "as_teaser-kicker" })
urls = [a['href'] for a in titles]
if source is 'sz-without-readability':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.sueddeutsche.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("div", { "class" : "teaser" })
urls = [a.findNext('a')['href'] for a in titles]
if source is 'zeit':
# fetching articles from zeit.de/politik
url = 'http://www.zeit.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("span", { "class" : "supertitle" })
urls = [a.parent['href'] for a in titles if a.parent['href'].find('/2015-')>0]
print "Found %d articles on %s"%(len(urls),url)
# predict party from url for this source
print "Predicting %s"%source
articles = []
for url in urls:
try:
title,text = fetch_url(url)
prediction = clf.predict(text)
prediction['url'] = url
articles.append((title,prediction))
except:
print('Could not get text from %s'%url)
pass
news[source] = dict(articles)
# save results
datestr = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
open(folder+'/news-%s'%(datestr) + '.json', 'wb').write(json.dumps(news,ensure_ascii=False).encode('utf8'))
def all_saved_news(folder='model'):
import glob
from string import digits
# get just the most recent news articles file (assuming date label ordering)
news = json.load(open(glob.glob(folder+'/news*.json')[-1],"r"))
# collect text data from all articles
articles, data = [], []
for source in news.keys():
for title, article in news[source].items():
# remove numbers
for d in digits: article['text'] = article['text'].replace(d,'')
data.append(article['text'])
predictions = [prediction['probability'] for prediction in article['prediction']]
articles.append({
'source':source,
'title':title,
'url':article['url'],
'prediction':article['prediction'],
'predictedLabel':article['prediction'][argmax(predictions)]['party']
})
return articles, data
def pairwise_dists(data, nneighbors=10, folder='model', dist='l2'):
'''
Computes pairwise distances between bag-of-words vectors of articles
INPUT
folder model folder
nneighbors number of closest neighbors to include in distance list
'''
stopwords = codecs.open("stopwords.txt", "r", encoding="utf-8", errors='ignore').readlines()[5:]
stops = map(lambda x:x.lower().strip(),stopwords)
# using now stopwords and filtering out digits
bow = TfidfVectorizer(min_df=2,stop_words=stops)
X = bow.fit_transform(data)
print 'Computing %s pairwise distances'%dist
# KPCA transform bow vectors
if dist is 'l2_kpca_zscore':
K = pairwise_distances(X,metric='l2',n_jobs=1)
perc = 50.0
width = percentile(K.flatten(),perc)
Xc = zscore(KernelPCA(n_components=50,kernel='rbf',gamma=width).fit_transform(X))
K = pairwise_distances(Xc,metric='l2',n_jobs=1)
elif dist is 'l2_kpca':
K = pairwise_distances(X,metric='l2',n_jobs=1)
perc = 100./len(data)
width = percentile(K.flatten(),perc)
Xc = KernelPCA(n_components=50,kernel='rbf',gamma=width).fit_transform(X)
K = pairwise_distances(Xc,metric='l2',n_jobs=1)
elif dist is 'l2':
K = pairwise_distances(X,metric='l2',n_jobs=1)
elif dist is 'l1':
K = pairwise_distances(X,metric='l1',n_jobs=1)
# collect closest neighbors
distances = []
for urlidx in range(len(data)):
idx = (K[urlidx,:]).argsort()[1:nneighbors+1]
for sidx in idx:
distances.append([urlidx,sidx,(idx==sidx).nonzero()[0][0]])
return distances
def load_sentiment(negative='SentiWS_v1.8c/SentiWS_v1.8c_Negative.txt',\
positive='SentiWS_v1.8c/SentiWS_v1.8c_Positive.txt'):
words = dict()
for line in open(negative).readlines():
parts = line.strip('\n').split('\t')
words[parts[0].split('|')[0]] = double(parts[1])
if len(parts)>2:
for inflection in parts[2].strip('\n').split(','):
words[inflection] = double(parts[1])
for line in open(positive).readlines():
parts = line.strip('\n').split('\t')
words[parts[0].split('|')[0]] = double(parts[1])
if len(parts)>2:
for inflection in parts[2].strip('\n').split(','):
words[inflection] = double(parts[1])
return words
def get_sentiments(data):
# filtering out some noise words
stops = map(lambda x:x.lower().strip(),open('stopwords.txt').readlines()[6:])
# vectorize non-stopwords
bow = TfidfVectorizer(min_df=2,stop_words=stops)
X = bow.fit_transform(data)
# map sentiment vector to bow space
words = load_sentiment()
sentiment_vec = zeros(X.shape[1])
for key in words.keys():
if bow.vocabulary_.has_key(key):
sentiment_vec[bow.vocabulary_[key]] = words[key]
# compute sentiments
return X.dot(sentiment_vec)
def kpca_cluster(data,nclusters=100,ncomponents=40,topwhat=10,zscored=False):
'''
Computes clustering of bag-of-words vectors of articles
INPUT
folder model folder
nclusters number of clusters
'''
from sklearn.cluster import KMeans
# filtering out some noise words
stops = map(lambda x:x.lower().strip(),open('stopwords.txt').readlines()[6:])
# vectorize non-stopwords
bow = TfidfVectorizer(min_df=2,stop_words=stops)
X = bow.fit_transform(data)
# creating bow-index-to-word map
idx2word = dict(zip(bow.vocabulary_.values(),bow.vocabulary_.keys()))
# using now stopwords and filtering out digits
print 'Computing pairwise distances'
K = pairwise_distances(X,metric='l2',n_jobs=1)
perc = 50.0
width = percentile(K.flatten(),perc)
# KPCA transform bow vectors
Xc = KernelPCA(n_components=ncomponents,kernel='rbf',gamma=width).fit_transform(X)
if zscored:
Xc = zscore(Xc)
# compute clusters
km = KMeans(n_clusters=nclusters).fit(Xc)
Xc = km.predict(Xc)
clusters = []
for icluster in range(nclusters):
nmembers = (Xc==icluster).sum()
if True:#nmembers < len(data) / 5.0 and nmembers > 1: # only group clusters big enough but not too big
members = (Xc==icluster).nonzero()[0]
topwordidx = array(X[members,:].sum(axis=0))[0].argsort()[-topwhat:][::-1]
topwords = ' '.join([idx2word[wi] for wi in topwordidx])
meanDist = triu(pairwise_distances(X[members,:],metric='l2',n_jobs=1)).sum()
meanDist = meanDist / (len(members) + (len(members)**2 - len(members))/2.0)
# print u'Cluster %d'%icluster + u' %d members'%nmembers + u' mean Distance %f'%meanDist + u'\n\t'+topwords
clusters.append({
'name':'Cluster-%d'%icluster,
'description': topwords,
'members': list(members),
'meanL2Distances': meanDist
})
return clusters
def party_cluster(articles):
clusters = []
keyf = lambda a: a[1]['predictedLabel']
for k, group in itertools.groupby(sorted(enumerate(articles), key=keyf), keyf):
clusters.append({
'name': k,
'description': k,
'members': [index_article_tuple[0] for index_article_tuple in group]
})
return clusters
def write_distances_json(folder='model'):
articles, data = all_saved_news(folder)
dists = ['l2_kpca']
distances_json = {
'articles': articles,
'sentiments': json.dumps(get_sentiments(data).tolist()),
'distances': [
{ 'name': dist, 'distances': pairwise_dists(data,dist = dist) } for dist in dists
],
'clusterings': [
{ 'name': 'Parteivorhersage', 'clusters': party_cluster(articles) },
{ 'name': 'Ähnlichkeit', 'clusters': kpca_cluster(data,nclusters=len(articles)/2,ncomponents=40,zscored=False) },
]
}
# save article with party prediction and distances to closest articles
datestr = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
open(folder+'/distances-%s'%(datestr)+'.json', 'wb').write(json.dumps(distances_json))
# also save that latest version for the visualization
open(folder+'/distances.json', 'wb').write(json.dumps(distances_json))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(\
description='Downloads, transforms and clusters news articles')
parser.add_argument('-f','--folder',help='Folder to store text files [./model]',\
default='model')
parser.add_argument('-d','--download',help='If files should be downloaded',\
action='store_true', default=False)
parser.add_argument('-p','--distances',help='If pairwise distances of text should be computed',\
action='store_true', default=False)
args = vars(parser.parse_args())
if not os.path.isdir(args['folder']):
os.mkdir(args['folder'])
if args['download']:
get_news(folder=args['folder'])
if args['distances']:
write_distances_json(folder=args['folder'])
| mit |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <mdroe@stsci.edu>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| agpl-3.0 |
gef756/scipy | scipy/interpolate/interpolate.py | 25 | 80287 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/reshape/test_reshape.py | 1 | 25248 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
from collections import OrderedDict
import numpy as np
from numpy import nan
import pytest
from pandas.compat import u
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
from pandas.core.sparse.api import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
if sparse:
dtype_name = 'Sparse[{}, {}]'.format(
self.effective_dtype(dtype).name,
fill_value
)
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
tm.assert_series_equal(result.get_dtype_counts(), expected)
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
tm.assert_series_equal(result.get_dtype_counts().sort_index(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = pd.DataFrame({
"A_a": pd.SparseArray([1, 0, 1], dtype='uint8'),
"A_b": pd.SparseArray([0, 1, 0], dtype='uint8'),
"B_b": pd.SparseArray([1, 1, 0], dtype='uint8'),
"B_c": pd.SparseArray([0, 0, 1], dtype='uint8'),
})
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ)})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected = expected[['C'] + cols]
typ = pd.SparseArray if sparse else pd.Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat([
pd.Series([1, 2, 3], name='C'),
pd.Series([1, 0, 1], name='bad_a', dtype='Sparse[uint8]'),
pd.Series([0, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([1, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([0, 0, 1], name='bad_c', dtype='Sparse[uint8]'),
], axis=1)
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'B': ['b', 'b', 'c'],
'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0]}, dtype=np.uint8)
expected[['C']] = df[['C']]
if sparse:
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
if sparse:
cols = ['A..a', 'A..b', 'B..b', 'B..c']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'C': [1, 2, 3],
'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c']})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].apply(
lambda x: pd.SparseSeries(x)
)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': arr([1, 0, 1, 0], dtype=typ),
'A_b': arr([0, 1, 0, 0], dtype=typ),
'A_nan': arr([0, 0, 0, 1], dtype=typ),
'B_b': arr([1, 1, 0, 0], dtype=typ),
'B_c': arr([0, 0, 1, 0], dtype=typ),
'B_nan': arr([0, 0, 0, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ),
'cat_x': arr([1, 0, 0], dtype=typ),
'cat_y': arr([0, 1, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('get_dummies_kwargs,expected', [
({'data': pd.DataFrame(({u'ä': ['a']}))},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'ä']})},
pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 pd.get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sparse', [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]),
('Nation', ['AB', 'CD'])]))
df = get_dummies(df, columns=['Nation'], sparse=sparse)
df2 = df.reindex(columns=['GDP'])
tm.assert_frame_equal(df[['GDP']], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
class TestCategoricalReshape(object):
def test_reshaping_multi_index_categorical(self):
# construct a MultiIndexed DataFrame formerly created
# via `tm.makePanel().to_frame()`
cols = ['ItemA', 'ItemB', 'ItemC']
data = {c: tm.makeTimeDataFrame() for c in cols}
df = pd.concat({c: data[c].stack() for c in data}, axis='columns')
df.index.names = ['major', 'minor']
df['str'] = 'foo'
dti = df.index.levels[0]
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(dti))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=dti)
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
codes=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
JFriel/honours_project | networkx/build/lib/networkx/convert_matrix.py | 10 | 33329 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
try:
import pandas
except:
raise SkipTest("Pandas not available")
| gpl-3.0 |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/testing/image_util.py | 11 | 3765 | # This module contains some functionality from the Python Imaging
# Library, that has been ported to use Numpy arrays rather than PIL
# Image objects.
# The Python Imaging Library is
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1995-2009 by Fredrik Lundh
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appears in all
# copies, and that both that copyright notice and this permission notice
# appear in supporting documentation, and that the name of Secret Labs
# AB or the author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.cbook import deprecated, warn_deprecated
warn_deprecated('1.4.0', name='matplotlib.testing.image_util',
obj_type='module')
@deprecated('1.4.0')
def autocontrast(image, cutoff=0):
"""
Maximize image contrast, based on histogram. This completely
ignores the alpha channel.
"""
assert image.dtype == np.uint8
output_image = np.empty((image.shape[0], image.shape[1], 3), np.uint8)
for i in xrange(0, 3):
plane = image[:,:,i]
output_plane = output_image[:,:,i]
h = np.histogram(plane, bins=256)[0]
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in xrange(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in xrange(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in xrange(256):
if h[lo]:
break
for hi in xrange(255, -1, -1):
if h[hi]:
break
if hi <= lo:
output_plane[:,:] = plane
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
lut = np.arange(256, dtype=np.float)
lut *= scale
lut += offset
lut = lut.clip(0, 255)
lut = lut.astype(np.uint8)
output_plane[:,:] = lut[plane]
return output_image
| gpl-2.0 |
pnedunuri/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
jakobworldpeace/scikit-learn | sklearn/ensemble/forest.py | 8 | 67993 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
ominux/scikit-learn | examples/linear_model/plot_sgd_iris.py | 4 | 2171 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
pl.set_cmap(pl.cm.Paired)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
cs = pl.contourf(xx, yy, Z)
pl.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes, colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i])
pl.title("Decision surface of multi-class SGD")
pl.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
pl.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes, colors):
plot_hyperplane(i, color)
pl.legend()
pl.show()
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
kcompher/thunder | thunder/extraction/source.py | 6 | 31847 | from numpy import asarray, mean, sqrt, ndarray, amin, amax, concatenate, sum, zeros, maximum, \
argmin, newaxis, ones, delete, NaN, inf, isnan, clip, logical_or, unique, where, all
from thunder.utils.serializable import Serializable
from thunder.utils.common import checkParams, aslist
from thunder.rdds.images import Images
from thunder.rdds.series import Series
class Source(Serializable, object):
"""
A single source, represented as a list of coordinates and other optional specifications.
A source also has a set of lazily computed attributes useful for representing and comparing
its geometry, such as center, bounding box, and bounding polygon. These properties
will be computed lazily and made available as attributes when requested.
Parameters
----------
coordinates : array-like
List of 2D or 3D coordinates, can be a list of lists or array of shape (n,2) or (n,3)
values : list or array-like
Value (or weight) associated with each coordiante
id : int or string
Arbitrary specification per source, typically an index or string label
Attributes
----------
center : list or array-like
The coordinates of the center of the source
polygon : list or array-like
The coordinates of a polygon bounding the region (a convex hull)
bbox : list or array-like
Boundaries of the source (with the lowest values for all axes followed by the highest values)
area : scalar
The area of the region
"""
from zope.cachedescriptors import property
def __init__(self, coordinates, values=None, id=None):
self.coordinates = asarray(coordinates)
if self.coordinates.ndim == 1 and len(self.coordinates) > 0:
self.coordinates = asarray([self.coordinates])
if values is not None:
self.values = asarray(values)
if self.values.ndim == 0:
self.values = asarray([self.values])
if not (len(self.coordinates) == len(self.values)):
raise ValueError("Lengths of coordinates %g and values %g do not match"
% (len(self.coordinates), len(self.values)))
if id is not None:
self.id = id
@property.Lazy
def center(self):
"""
Find the region center using a mean.
"""
# TODO Add option to use weights
return mean(self.coordinates, axis=0)
@property.Lazy
def polygon(self):
"""
Find the bounding polygon as a convex hull
"""
# TODO Add option for simplification
from scipy.spatial import ConvexHull
if len(self.coordinates) >= 4:
inds = ConvexHull(self.coordinates).vertices
return self.coordinates[inds]
else:
return self.coordinates
@property.Lazy
def bbox(self):
"""
Find the bounding box.
"""
mn = amin(self.coordinates, axis=0)
mx = amax(self.coordinates, axis=0)
return concatenate((mn, mx))
@property.Lazy
def area(self):
"""
Find the region area.
"""
return len(self.coordinates)
def restore(self, skip=None):
"""
Remove all lazy properties, will force recomputation
"""
if skip is None:
skip = []
elif isinstance(skip, str):
skip = [skip]
for prop in LAZY_ATTRIBUTES:
if prop in self.__dict__.keys() and prop not in skip:
del self.__dict__[prop]
return self
def distance(self, other, method='euclidean'):
"""
Distance between the center of this source and another.
Parameters
----------
other : Source, or array-like
Either another source, or the center coordinates of another source
method : str
Specify a distance measure to used for spatial distance between source
centers. Current options include Euclidean distance ('euclidean') and
L1-norm ('l1').
"""
from numpy.linalg import norm
checkParams(method, ['euclidean', 'l1'])
if method == 'l1':
order = 1
else:
order = 2
if isinstance(other, Source):
return norm(self.center - other.center, ord=order)
elif isinstance(other, list) or isinstance(other, ndarray):
return norm(self.center - asarray(other), ord=order)
def overlap(self, other, method='fraction'):
"""
Compute the overlap between this source and other.
Options are a symmetric measure of overlap based on the fraction
of intersecting pixels relative to the union ('fraction'), an assymmetric
measure of overlap that expresses detected intersecting pixels
(relative to this source) using precision and recall rates ('rates'), or
a correlation coefficient of the weights within the intersection
(not defined for binary weights) ('correlation')
Parameters
----------
other : Source
The source to compute overlap with.
method : str
Which estimate of overlap to compute, options are
'fraction' (symmetric) 'rates' (asymmetric) or 'correlation'
"""
checkParams(method, ['fraction', 'rates', 'correlation'])
coordsSelf = aslist(self.coordinates)
coordsOther = aslist(other.coordinates)
intersection = [a for a in coordsSelf if a in coordsOther]
nhit = float(len(intersection))
ntotal = float(len(set([tuple(x) for x in coordsSelf] + [tuple(x) for x in coordsOther])))
if method == 'rates':
recall = nhit / len(coordsSelf)
precision = nhit / len(coordsOther)
return recall, precision
if method == 'fraction':
return nhit / float(ntotal)
if method == 'correlation':
from scipy.stats import spearmanr
if not (hasattr(self, 'values') and hasattr(other, 'values')):
raise ValueError('Sources must have values to compute correlation')
else:
valuesSelf = aslist(self.values)
valuesOther = aslist(other.values)
if len(intersection) > 0:
left = [v for v, c in zip(valuesSelf, coordsSelf) if c in coordsOther]
right = [v for v, c in zip(valuesOther, coordsOther) if c in coordsSelf]
rho, _ = spearmanr(left, right)
else:
rho = 0.0
return rho
def merge(self, other):
"""
Combine this source with other
"""
self.coordinates = concatenate((self.coordinates, other.coordinates))
if hasattr(self, 'values'):
self.values = concatenate((self.values, other.values))
return self
def tolist(self):
"""
Convert array-like attributes to list
"""
import copy
new = copy.copy(self)
for prop in ["coordinates", "values", "center", "bbox", "polygon"]:
if prop in self.__dict__.keys():
val = new.__getattribute__(prop)
if val is not None and not isinstance(val, list):
setattr(new, prop, val.tolist())
return new
def toarray(self):
"""
Convert array-like attributes to ndarray
"""
import copy
new = copy.copy(self)
for prop in ["coordinates", "values", "center", "bbox", "polygon"]:
if prop in self.__dict__.keys():
val = new.__getattribute__(prop)
if val is not None and not isinstance(val, ndarray):
setattr(new, prop, asarray(val))
return new
def crop(self, minBound, maxBound):
"""
Crop a source by removing coordinates outside bounds.
Follows normal slice indexing conventions.
Parameters
----------
minBound : tuple
Minimum or starting bounds for each axis
maxBound : tuple
Maximum or ending bounds for each axis
"""
coords = self.coordinates
newid = self.id if hasattr(self, 'id') else None
if hasattr(self, 'values') and self.values is not None:
values = self.values
inside = [(c, v) for c, v in zip(coords, values) if c not in coords]
newcoords, newvalues = zip(*inside)
return Source(coordinates=newcoords, values=newvalues, id=newid)
else:
newcoords = [c for c in coords if all(c >= minBound) and all(c < maxBound)]
return Source(coordinates=newcoords, id=newid)
def dilate(self, size):
"""
Dilate a source using morphological operators.
Parameters
----------
size : int
Size of dilation in pixels
"""
if size == 0:
newcoords = self.coordinates
else:
size = (size * 2) + 1
if hasattr(self, 'values') and self.values is not None:
raise AttributeError('Cannot dilate sources with values')
from skimage.morphology import binary_dilation
coords = self.coordinates
extent = self.bbox[len(self.center):] - self.bbox[0:len(self.center)] + 1 + size * 2
m = zeros(extent)
coords = (coords - self.bbox[0:len(self.center)] + size)
m[coords.T.tolist()] = 1
m = binary_dilation(m, ones((size, size)))
newcoords = asarray(where(m)).T + self.bbox[0:len(self.center)] - size
newcoords = [c for c in newcoords if all(c >= 0)]
newid = self.id if hasattr(self, 'id') else None
return Source(coordinates=newcoords, id=newid)
def exclude(self, other):
"""
Remove coordinates derived from another Source or an array.
If other is an array, will remove coordinates of all
non-zero elements from this source. If other is a source,
will remove any matching coordinates.
Parameters
----------
other : ndarray or Source
Source to remove
"""
if isinstance(other, ndarray):
coordsOther = asarray(where(other)).T
else:
coordsOther = aslist(other.coordinates)
coordsSelf = aslist(self.coordinates)
newid = self.id if hasattr(self, 'id') else None
if hasattr(self, 'values') and self.values is not None:
valuesSelf = self.values
complement = [(c, v) for c, v in zip(coordsSelf, valuesSelf) if c not in coordsOther]
newcoords, newvalues = zip(*complement)
return Source(coordinates=newcoords, values=newvalues, id=newid)
else:
complement = [a for a in coordsSelf if a not in coordsOther]
return Source(coordinates=complement, id=newid)
def outline(self, inner, outer):
"""
Compute source outline by differencing two dilations
Parameters
----------
inner : int
Size of inner outline boundary (in pixels)
outer : int
Size of outer outline boundary (in pixels)
"""
return self.dilate(outer).exclude(self.dilate(inner))
def transform(self, data, collect=True):
"""
Extract series from data using a list of sources.
Currently only supports averaging over coordinates.
Params
------
data : Images or Series object
The data from which to extract
collect : boolean, optional, default = True
Whether to collect to local array or keep as a Series
"""
if not (isinstance(data, Images) or isinstance(data, Series)):
raise Exception("Input must either be Images or Series (or a subclass)")
# TODO add support for weighting
if isinstance(data, Images):
output = data.meanByRegions([self.coordinates]).toSeries()
else:
output = data.meanOfRegion(self.coordinates)
if collect:
return output.collectValuesAsArray()
else:
return output
def mask(self, dims=None, binary=True, outline=False, color=None):
"""
Construct a mask from a source, either locally or within a larger image.
Parameters
----------
dims : list or tuple, optional, default = None
Dimensions of large image in which to draw mask. If none, will restrict
to the bounding box of the region.
binary : boolean, optional, deafult = True
Whether to incoporate values or only show a binary mask
outline : boolean, optional, deafult = False
Whether to only show outlines (derived using binary dilation)
color : str or array-like
RGB triplet (from 0 to 1) or named color (e.g. 'red', 'blue')
"""
from thunder import Colorize
coords = self.coordinates
if dims is None:
extent = self.bbox[len(self.center):] - self.bbox[0:len(self.center)] + 1
m = zeros(extent)
coords = (coords - self.bbox[0:len(self.center)])
else:
m = zeros(dims)
if hasattr(self, 'values') and self.values is not None and binary is False:
m[coords.T.tolist()] = self.values
else:
m[coords.T.tolist()] = 1
if outline:
from skimage.morphology import binary_dilation
m = binary_dilation(m, ones((3, 3))) - m
if color is not None:
m = Colorize(cmap='indexed', colors=[color]).transform([m])
return m
def inbounds(self, minBound, maxBound):
"""
Check what fraction of coordinates are inside given bounds
Parameters
----------
minBound : list or tuple
Minimum bounds
maxBounds : list or tuple
Maximum bounds
"""
minCheck = sum(self.coordinates < minBound, axis=1) > 0
maxCheck = sum(self.coordinates > maxBound, axis=1) > 0
fraction = 1 - sum(logical_or(minCheck, maxCheck)) / float(len(self.coordinates))
return fraction
@staticmethod
def fromMask(mask, id=None):
"""
Genearte a source from a mask.
Assumes that the mask is an image where all non-zero
elements are part of the source. If all non-zero
elements are 1, then values will be ignored
as the source is assumed to be binary.
Parameters
----------
mask : array-like
An array (typically 2D or 3D) containing the image mask
id : int or string
Arbitrary identifier for the source, typically an int or string
"""
mask = asarray(mask)
u = unique(mask)
if len(u) == 2 and u[0] == 0 and u[1] == 1:
inds = where(mask)
return Source(coordinates=asarray(zip(*inds)), id=id)
else:
inds = where(mask)
values = mask[inds]
coords = asarray(zip(*inds))
return Source(coordinates=coords, values=values, id=id)
@staticmethod
def fromCoordinates(coordinates, values=None, id=None):
"""
Generate a source from a list of coordinates and values.
Parameters
----------
coordinates : array-like
List coordinates as a list of lists or array of shape (n,2) or (n,3)
values : list or array-like
Value (or weight) associated with each coordiante
id : int or string
Arbitrary specification per source, typically an index or string label
"""
return Source(coordinates, values, id)
def __repr__(self):
s = self.__class__.__name__
for opt in ["id", "center", "bbox"]:
if hasattr(self, opt):
o = self.__getattribute__(opt)
os = o.tolist() if isinstance(o, ndarray) else o
s += '\n%s: %s' % (opt, repr(os))
return s
class SourceModel(Serializable, object):
"""
A source model as a collection of extracted sources.
Parameters
----------
sources : list or Sources or a single Source
The identified sources
See also
--------
Source
"""
def __init__(self, sources):
if isinstance(sources, Source):
self.sources = [sources]
elif isinstance(sources, list) and isinstance(sources[0], Source):
self.sources = sources
elif isinstance(sources, list):
self.sources = []
for ss in sources:
self.sources.append(Source(ss))
else:
raise Exception("Input type not recognized, must be Source, list of Sources, "
"or list of coordinates, got %s" % type(sources))
def __getitem__(self, entry):
if not isinstance(entry, int):
raise IndexError("Selection not recognized, must be Int, got %s" % type(entry))
return self.sources[entry]
def combiner(self, prop, tolist=True):
combined = []
for s in self.sources:
p = getattr(s, prop)
if tolist:
p = p.tolist()
combined.append(p)
return combined
@property
def coordinates(self):
"""
List of coordinates combined across sources
"""
return self.combiner('coordinates')
@property
def values(self):
"""
List of coordinates combined across sources
"""
return self.combiner('values')
@property
def centers(self):
"""
Array of centers combined across sources
"""
return asarray(self.combiner('center'))
@property
def polygons(self):
"""
List of polygons combined across sources
"""
return self.combiner('polygon')
@property
def areas(self):
"""
List of areas combined across sources
"""
return self.combiner('area', tolist=False)
@property
def count(self):
"""
Number of sources
"""
return len(self.sources)
def masks(self, dims=None, binary=True, outline=False, base=None, color=None, inds=None):
"""
Composite masks combined across sources as an image.
Parameters
----------
dims : list or tuple, optional, default = None
Dimensions of image in which to create masks, must either provide
these or provide a base image
binary : boolean, optional, deafult = True
Whether to incoporate values or only show a binary mask
outline : boolean, optional, deafult = False
Whether to only show outlines (derived using binary dilation)
base : SourceModel or array-like, optional, deafult = None
Base background image on which to put masks,
or another set of sources (usually for comparisons).
color : str, optional, deafult = None
Color to assign regions, will assign randomly if 'random'
inds : array-like, optional, deafult = None
List of indices if only showing a subset
"""
from thunder import Colorize
from matplotlib.cm import get_cmap
if inds is None:
inds = range(0, self.count)
if dims is None and base is None:
raise Exception("Must provide image dimensions for composite masks "
"or provide a base image.")
if base is not None and isinstance(base, SourceModel):
outline = True
if dims is None and base is not None:
dims = asarray(base).shape
if isinstance(base, SourceModel):
base = base.masks(dims, color='silver')
elif isinstance(base, ndarray):
base = Colorize(cmap='indexed', colors=['white']).transform([base])
if base is not None and color is None:
color = 'deeppink'
if color == 'random':
combined = zeros(list(dims) + [3])
ncolors = min(self.count, 20)
colors = get_cmap('rainbow', ncolors)(range(0, ncolors, 1))[:, 0:3]
for i in inds:
combined = maximum(self.sources[i].mask(dims, binary, outline, colors[i % len(colors)]), combined)
else:
combined = zeros(dims)
for i in inds:
combined = maximum(self.sources[i].mask(dims, binary, outline), combined)
if color is not None and color != 'random':
combined = Colorize(cmap='indexed', colors=[color]).transform([combined])
if base is not None:
combined = maximum(base, combined)
return combined
def match(self, other, unique=False, minDistance=inf):
"""
For each source in self, find the index of the closest source in other.
Uses euclidean distances between centers to determine distances.
Can select nearest matches with or without enforcing uniqueness;
if unique is False, will return the closest source in other for
each source in self, possibly repeating sources multiple times
if unique is True, will only allow each source in other to be matched
with a single source in self, as determined by a greedy selection procedure.
The minDistance parameter can be used to prevent far-away sources from being
chosen during greedy selection.
Params
------
other : SourceModel
The source model to match sources to
unique : boolean, optional, deafult = True
Whether to only return unique matches
minDistance : scalar, optiona, default = inf
Minimum distance to use when selecting matches
"""
from scipy.spatial.distance import cdist
targets = other.centers
targetInds = range(0, len(targets))
matches = []
for s in self.sources:
update = 1
# skip if no targets left, otherwise update
if len(targets) == 0:
update = 0
else:
dists = cdist(targets, s.center[newaxis])
if dists.min() < minDistance:
ind = argmin(dists)
else:
update = 0
# apply updates, otherwise add a nan
if update == 1:
matches.append(targetInds[ind])
if unique is True:
targets = delete(targets, ind, axis=0)
targetInds = delete(targetInds, ind)
else:
matches.append(NaN)
return matches
def distance(self, other, minDistance=inf):
"""
Compute the distance between each source in self and other.
First estimates a matching source from other for each source
in self, then computes the distance between the two sources.
The matches are unique, using a greedy procedure,
and minDistance can be used to prevent outliers during matching.
Parameters
----------
other : SourceModel
The sources to compute distances to
minDistance : scalar, optiona, default = inf
Minimum distance to use when matching indices
"""
inds = self.match(other, unique=True, minDistance=minDistance)
d = []
for jj, ii in enumerate(inds):
if ii is not NaN:
d.append(self[jj].distance(other[ii]))
else:
d.append(NaN)
return asarray(d)
def overlap(self, other, method='fraction', minDistance=inf):
"""
Estimate overlap between sources in self and other.
Will compute the similarity of sources in self that are found
in other, based on either source pixel overlap or correlation.
Parameters
----------
other : SourceModel
The sources to compare to
method : str, optional, default = 'fraction"
Method to use when computing overlap between sources
('fraction', 'rates', or 'correlation')
minDistance : scalar, optional, default = inf
Minimum distance to use when matching indices
"""
inds = self.match(other, unique=True, minDistance=minDistance)
d = []
for jj, ii in enumerate(inds):
if ii is not NaN:
d.append(self[jj].overlap(other[ii], method=method))
else:
if method == 'rates':
d.append((NaN, NaN))
else:
d.append(NaN)
return asarray(d)
def similarity(self, other, metric='distance', thresh=5, minDistance=inf):
"""
Estimate similarity to another set of sources using recall and precision.
Will compute the number of sources in self that are also
in other, based on a given distance metric and a threshold.
The recall rate is the number of matches divided by the number in self,
and the precision rate is the number of matches divided by the number in other.
Typically self is ground truth and other is an estimate.
The F score is defined as 2 * (recall * precision) / (recall + precision)
Before computing metrics, all sources in self are matched to other,
and a minimum distance can be set to control matching.
Parameters
----------
other : SourceModel
The sources to compare to.
metric : str, optional, default = 'distance'
Metric to use when computing distances,
options include 'distance' and 'overlap'
thresh : scalar, optional, default = 5
The distance below which a source is considered found.
minDistance : scalar, optional, default = inf
Minimum distance to use when matching indices.
"""
checkParams(metric, ['distance', 'overlap'])
if metric == 'distance':
# when evaluating distances,
# minimum distance should be the threshold
if minDistance == inf:
minDistance = thresh
vals = self.distance(other, minDistance=minDistance)
vals[isnan(vals)] = inf
compare = lambda x: x < thresh
elif metric == 'overlap':
vals = self.overlap(other, method='fraction', minDistance=minDistance)
vals[isnan(vals)] = 0
compare = lambda x: x > thresh
else:
raise Exception("Metric not recognized")
recall = sum(map(compare, vals)) / float(self.count)
precision = sum(map(compare, vals)) / float(other.count)
score = 2 * (recall * precision) / (recall + precision)
return recall, precision, score
def transform(self, data, collect=True):
"""
Extract series from data using a list of sources.
Currently only supports simple averaging over coordinates.
Params
------
data : Images or Series object
The data from which to extract signals
collect : boolean, optional, default = True
Whether to collect to local array or keep as a Series
"""
if not (isinstance(data, Images) or isinstance(data, Series)):
raise Exception("Input must either be Images or Series (or a subclass)")
# TODO add support for weighting
if isinstance(data, Images):
output = data.meanByRegions(self.coordinates).toSeries()
else:
output = data.meanByRegions(self.coordinates)
if collect:
return output.collectValuesAsArray()
else:
return output
def clean(self, cleaners=None):
"""
Apply one or more cleaners to sources, returning filtered sources
Parameters
----------
cleaners : Cleaner or list of Cleaners, optional, default = None
Which cleaners to apply, if None, will apply BasicCleaner with defaults
"""
from thunder.extraction.cleaners import Cleaner, BasicCleaner
from copy import copy
if isinstance(cleaners, list):
for c in cleaners:
if not isinstance(c, Cleaner):
raise Exception("List must only contain Cleaners")
elif isinstance(cleaners, Cleaner):
cleaners = [cleaners]
elif cleaners is None:
cleaners = [BasicCleaner()]
else:
raise Exception("Must provide Cleaner or list of Cleaners, got %s" % type(cleaners))
newmodel = copy(self)
for c in cleaners:
newmodel = c.clean(newmodel)
return newmodel
def dilate(self, size):
"""
Dilate all sources using morphological operators
Parameters
----------
size : int
Size of dilation in pixels
"""
return SourceModel([s.dilate(size) for s in self.sources])
def outline(self, inner, outer):
"""
Outline all sources
inner : int
Size of inner outline boundary (in pixels)
outer : int
Size of outer outline boundary (in pixels)
"""
return SourceModel([s.outline(inner, outer) for s in self.sources])
def crop(self, minBound, maxBound):
"""
Crop all sources by removing coordinates outside of bounds
Parameters
----------
minBound : tuple
Minimum or starting bounds for each axis
maxBound : tuple
Maximum or ending bounds for each axis
"""
return SourceModel([s.crop(minBound, maxBound) for s in self.sources])
def save(self, f, include=None, overwrite=False, **kwargs):
"""
Custom save to file with simplified, human-readable output, and selection of lazy attributes.
"""
import copy
output = copy.deepcopy(self)
if isinstance(include, str):
include = [include]
if include is not None:
for prop in include:
map(lambda s: getattr(s, prop), output.sources)
output.sources = map(lambda s: s.restore(include).tolist(), output.sources)
simplify = lambda d: d['sources']['py/homogeneousList']['data']
super(SourceModel, output).save(f, simplify=simplify, overwrite=overwrite, **kwargs)
@classmethod
def load(cls, f, **kwargs):
"""
Custom load from file to handle simplified, human-readable output
"""
unsimplify = lambda d: {'sources': {
'py/homogeneousList': {'data': d, 'module': 'thunder.extraction.source', 'type': 'Source'}}}
output = super(SourceModel, cls).load(f, unsimplify=unsimplify)
output.sources = map(lambda s: s.toarray(), output.sources)
return output
@classmethod
def deserialize(cls, d, **kwargs):
"""
Custom load from JSON to handle simplified, human-readable output
"""
unsimplify = lambda d: {'sources': {
'py/homogeneousList': {'data': d, 'module': 'thunder.extraction.source', 'type': 'Source'}}}
output = super(SourceModel, cls).deserialize(d, unsimplify=unsimplify)
output.sources = map(lambda s: s.toarray(), output.sources)
return output
def __repr__(self):
s = self.__class__.__name__
s += '\n%g sources' % (len(self.sources))
return s
LAZY_ATTRIBUTES = ["center", "polygon", "bbox", "area"]
| apache-2.0 |
Mako-kun/mangaki | mangaki/mangaki/utils/svd.py | 2 | 5410 | from django.contrib.auth.models import User
from mangaki.models import Rating, Work, Recommendation
from mangaki.utils.chrono import Chrono
from mangaki.utils.values import rating_values
from scipy.sparse import lil_matrix
from sklearn.utils.extmath import randomized_svd
import numpy as np
from django.db import connection
import pickle
import json
import math
NB_COMPONENTS = 10
TOP = 10
class MangakiSVD(object):
M = None
U = None
sigma = None
VT = None
chrono = None
inv_work = None
inv_user = None
work_titles = None
def __init__(self):
self.chrono = Chrono(True)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
def load(self, filename):
with open(filename, 'rb') as f:
backup = pickle.load(f)
self.M = backup.M
self.U = backup.U
self.sigma = backup.sigma
self.VT = backup.VT
self.inv_work = backup.inv_work
self.inv_user = backup.inv_user
self.work_titles = backup.work_titles
def fit(self, X, y):
self.work_titles = {}
for work in Work.objects.values('id', 'title'):
self.work_titles[work['id']] = work['title']
work_ids = list(Rating.objects.values_list('work_id', flat=True).distinct())
nb_works = len(work_ids)
self.inv_work = {work_ids[i]: i for i in range(nb_works)}
user_ids = list(User.objects.values_list('id', flat=True))
nb_users = len(user_ids)
self.inv_user = {user_ids[i]: i for i in range(nb_users)}
self.chrono.save('get_work_ids')
# print("Computing M: (%i × %i)" % (nb_users, nb_works))
self.M = lil_matrix((nb_users, nb_works))
"""ratings_of = {}
for (user_id, work_id), rating in zip(X, y):
ratings_of.setdefault(user_id, []).append(rating)"""
for (user_id, work_id), rating in zip(X, y):
self.M[self.inv_user[user_id], self.inv_work[work_id]] = rating #- np.mean(ratings_of[user_id])
# np.save('backupM', self.M)
self.chrono.save('fill matrix')
# Ranking computation
self.U, self.sigma, self.VT = randomized_svd(self.M, NB_COMPONENTS, n_iter=3, random_state=42)
# print('Formes', self.U.shape, self.sigma.shape, self.VT.shape)
self.save('backup.pickle')
self.chrono.save('factor matrix')
def predict(self, X):
y = []
for user_id, work_id in X:
i = self.inv_user[user_id]
j = self.inv_work[work_id]
y.append(self.U[i].dot(np.diag(self.sigma)).dot(self.VT.transpose()[j]))
return np.array(y)
def get_reco(self, username, sending=False):
target_user = User.objects.get(username=username)
the_user_id = target_user.id
svd_user = User.objects.get(username='svd')
work_ids = {self.inv_work[work_id]: work_id for work_id in self.inv_work}
nb_works = len(work_ids)
seen_works = set(Rating.objects.filter(user__id=the_user_id).exclude(choice='willsee').values_list('work_id', flat=True))
the_i = self.inv_user[the_user_id]
self.chrono.save('get_seen_works')
print('mon vecteur (taille %d)' % len(self.U[the_i]), self.U[the_i])
print(self.sigma)
for i, line in enumerate(self.VT):
print('=> Ligne %d' % (i + 1), '(ma note : %f)' % self.U[the_i][i])
sorted_line = sorted((line[j], self.work_titles[work_ids[j]]) for j in range(nb_works))[::-1]
top5 = sorted_line[:10]
bottom5 = sorted_line[-10:]
for anime in top5:
print(anime)
for anime in bottom5:
print(anime)
"""if i == 0 or i == 1: # First two vectors explaining variance
with open('vector%d.json' % (i + 1), 'w') as f:
vi = X.dot(line).tolist()
x_norm = [np.dot(X.data[k], X.data[k]) / (nb_works + 1) for k in range(nb_users + 1)]
f.write(json.dumps({'v': [v / math.sqrt(x_norm[k]) if x_norm[k] != 0 else float('inf') for k, v in enumerate(vi)]}))"""
# print(VT.dot(VT.transpose()))
# return
the_ratings = self.predict((the_user_id, work_ids[j]) for j in range(nb_works))
ranking = sorted(zip(the_ratings, [(work_ids[j], self.work_titles[work_ids[j]]) for j in range(nb_works)]), reverse=True)
# Summarize the results of the ranking for the_user_id:
# “=> rank, title, score”
c = 0
for i, (rating, (work_id, title)) in enumerate(ranking, start=1):
if work_id not in seen_works:
print('=>', i, title, rating, self.predict([(the_user_id, work_id)]))
if Recommendation.objects.filter(user=svd_user, target_user__id=the_user_id, work__id=work_id).count() == 0:
Recommendation.objects.create(user=svd_user, target_user_id=the_user_id, work_id=work_id)
c += 1
elif i < TOP:
print(i, title, rating)
if c >= TOP:
break
"""print(len(connection.queries), 'queries')
for line in connection.queries:
print(line)"""
self.chrono.save('complete')
def __str__(self):
return '[SVD]'
def get_shortname(self):
return 'svd'
| agpl-3.0 |
phev8/dataset_tools | experiment_handler/time_synchronisation.py | 1 | 1444 | import os
import pandas as pd
def read_synchronisation_file(experiment_root):
filepath = os.path.join(experiment_root, "labels", "synchronisation.csv")
return pd.read_csv(filepath)
def convert_timestamps(experiment_root, timestamps, from_reference, to_reference):
"""
Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time)
Parameters
----------
experiment_root: str
Root of the current experiment (to find the right synchronisation matrix)
timestamps: float or array like
timestamps to be converted
from_reference: str
name of the reference of the original timestamps
to_reference: str
name of the reference time the timestamp has to be converted to
Returns
-------
converted_timestamps: float or array like
Timestamps given in to_reference time values
"""
synchronisation_file = read_synchronisation_file(experiment_root)
offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0]
converted_timestamps = timestamps + offset
return converted_timestamps
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video")) | mit |