repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
samuel1208/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
kosklain/MitosisDetection | ImageWorker.py | 1 | 11990 | """
Created on Jun 20, 2013
This class is the core of two important parts of the project:
firstly, the feature calculation is done here. And secondly, the
image segmentation is done here.
More details on every class/method.
@author: Bibiana and Adria
"""
from scipy import ndimage, fftpack
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
from math import pi
from skimage.util import dtype
from scipy.ndimage.filters import gaussian_laplace
from skimage.color import rgb2hed
from mahotas.features import texture, zernike_moments
import pywt
from scipy.stats.stats import skew, kurtosis
from Tamura import Tamura
from scipy.stats.mstats_basic import mquantiles
"""
Constant for L resizing
"""
rangeL = (1.0/0.95047)*116.- 16.
"""
Constants for HEDAB resizing
"""
imageTest = np.array([[[255,0,0]]], dtype=np.uint8)
minH = rgb2hed(imageTest)[0][0][0]
imageTest = np.array([[[0,255,255]]], dtype=np.uint8)
maxH = rgb2hed(imageTest)[0][0][0]
rangeH = maxH-minH
imageTest = np.array([[[0,255,0]]], dtype=np.uint8)
minE = rgb2hed(imageTest)[0][0][1]
imageTest = np.array([[[255,0,255]]], dtype=np.uint8)
maxE = rgb2hed(imageTest)[0][0][1]
rangeE = maxE-minE
imageTest = np.array([[[0,0,255]]], dtype=np.uint8)
minDAB = rgb2hed(imageTest)[0][0][2]
imageTest = np.array([[[255,255,0]]], dtype=np.uint8)
maxDAB = rgb2hed(imageTest)[0][0][2]
rangeDAB = maxDAB-minDAB
"""
Base class for operating with images. It forces the user to implement
the getPatch method on the classes that inherit from it. It also has
some general purpose methods for ploting and calculating limits.
"""
class ImageWorker:
def __init__(self, image, rows=None, columns=None, convert=False):
if convert:
self.image = dtype.img_as_float(image)
else:
self.image = image
self.rows = len(image) if rows is None else rows
self.columns = len(image[0]) if columns is None else columns
def showOnlyPatches(self, centers, size):
finalImage = np.zeros(self.image.shape, dtype=np.uint8)
for center in centers:
(beginningi, beginningj, finali, finalj) = self.getLimits(center, size)
finalImage[beginningi:finali, beginningj:finalj, :] = self.image[beginningi:finali, beginningj:finalj, :]
plt.imshow(finalImage)
plt.show()
def getLimits(self, center, size):
beginningi = max(center[0] - (size - 1) / 2, 0)
beginningj = max(center[1] - (size - 1) / 2, 0)
finali = min(beginningi + size, self.rows-1)
finalj = min(beginningj + size, self.columns-1)
return (beginningi, beginningj, finali, finalj)
def getPatch(self, center, size):
raise NotImplementedError("Should have implemented this")
"""
3D image (RGB, HSV, LAB, etc.) worker. It gets the needed features, and it implements
the segmentation operation with getBinaryImage(), which gets the binary image that contains
True for those pixels that were below all the thresholds and True for the ones which were not
below all thresholds.
"""
class RGBImageWorker(ImageWorker):
"""
The only1D is used when we are calculating the features of only those pixels
that pass all the filters.
"""
def getGeneralStatistics(self, hara=False, zern=False, tamura=False, only1D=None):
generalStatistics = []
if self.rows == 1 and self.columns == 1:
for index in range(3):
generalStatistics.append(self.image[0,0,index])
return generalStatistics
if not only1D is None:
im = only1D
generalStatistics.extend(self._calculateStatistics(im, haralick=hara, zernike=zern))
fourierTransform = np.abs(fftpack.fft2(im)) #fourierTransform
generalStatistics.extend(self._calculateStatistics(fourierTransform))
waveletTransform = pywt.dwt2(im, 'sym5')[0]
generalStatistics.extend(self._calculateStatistics(waveletTransform))
waveletFourierTransform = pywt.dwt2(fourierTransform, 'sym5')[0]
generalStatistics.extend(self._calculateStatistics(waveletFourierTransform))
if tamura:
generalStatistics.extend(self.get3Dstatistics(tamura=True))
return generalStatistics
for index in range(3):
im = self.image[:, :, index]
generalStatistics.extend(self._calculateStatistics(im, haralick=hara, zernike=zern))
fourierTransform = np.abs(fftpack.fft2(im)) #fourierTransform
generalStatistics.extend(self._calculateStatistics(fourierTransform))
waveletTransform = pywt.dwt2(im, 'sym5')[0]
generalStatistics.extend(self._calculateStatistics(waveletTransform))
waveletFourierTransform = pywt.dwt2(fourierTransform, 'sym5')[0]
generalStatistics.extend(self._calculateStatistics(waveletFourierTransform))
if tamura:
generalStatistics.extend(self.get3Dstatistics(tamura=True))
return generalStatistics
"""
Features calculated with only one component (for example, R from RGB).
"""
def _calculateStatistics(self, img, haralick=False, zernike=False):
result = []
#3-bin histogram
result.extend(mquantiles(img))
#First four moments
result.extend([img.mean(), img.var(), skew(img,axis=None), kurtosis(img,axis=None)])
#Haralick features
if haralick:
integerImage = dtype.img_as_ubyte(img)
result.extend(texture.haralick(integerImage).flatten())
#Zernike moments
if zernike:
result.extend(zernike_moments(img, int(self.rows)/2 + 1))
return result
"""
Features calculated with the whole image at once.
"""
def get3Dstatistics(self, tamura=False):
result = []
#Tamura features
if tamura:
#result.append(Tamura.coarseness(self.image)) it may not work!
result.append(Tamura.contrast(self.image))
result.extend(Tamura.directionality(self.image))
return result
"""
Plot the histogram of a given channel.
"""
def plotHistogram(self, img):
hist, bin_edges = np.histogram(img, bins=60)
bin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])
plt.plot(bin_centers, hist, lw=2)
plt.show()
"""
Filter an image based on a percentile.
"""
def filterImage(self, im, threshold):
if self.ploting:
self.plotHistogram(im)
return im < np.percentile(im,threshold)
"""
V from HSV color space
"""
def getV(self):
preV = np.asanyarray(self.image)
preV = dtype.img_as_float(preV)
return preV.max(-1)
"""
Y from XYZ color space
"""
def getY(self):
arr = np.asanyarray(self.image)
arr = dtype.img_as_float(arr)
mask = arr > 0.04045
arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)
arr[~mask] /= 12.92
Y = arr[:, :, 0] * 0.2126 + arr[:, :, 1] * 0.7152 + arr[:, :, 2] * 0.0722
return Y
"""
It returns L from LAB color space and L from LUV color space
"""
def getL(self):
Y = self.getY()
Y = Y / 0.95047
mask = Y > 0.008856
Y2 = np.power(Y, 1. / 3.)
Y[mask] = Y2[mask]
Y[~mask] = 7.787 * Y[~mask] + 16. / 116.
L = (116. * Y) - 16.
L2 = (116. * Y2) - 16.
return (L, L2)
"""
It returns the thresholded (segmented) image, as well as
the HED image and BRVL image.
"""
def getBinaryImage(self):
self.ploting = False
HEDAB = rgb2hed(self.image)
R = self.image[:, :, 0]
G = self.image[:, :, 1]
B = self.image[:, :, 2]
H = HEDAB[:,:,0]
E = HEDAB[:,:,1]
DAB = HEDAB[:,:,2]
BR = B*2/((1+R+G)*(1+B+R+G)) #Blue-ratio image
V = self.getV() #From HSV
(L, L2) = self.getL() #From CIELAB and CIELUV
BRSmoothed = ndimage.gaussian_filter(BR,1)
LSmoothed = ndimage.gaussian_filter(L,1)
VSmoothed = ndimage.gaussian_filter(V,1)
HSmoothed = ndimage.gaussian_filter(H,1)
ESmoothed = ndimage.gaussian_filter(E,1)
RSmoothed = ndimage.gaussian_filter(R,1)
DABSmoothed = ndimage.gaussian_filter(DAB,1)
imLLog = self.filterImage(gaussian_laplace(LSmoothed,9), 85) == False
imVLog = self.filterImage(gaussian_laplace(VSmoothed, 9), 85) == False
imELog = self.filterImage(gaussian_laplace(ESmoothed,9), 84) == False
imRLog = self.filterImage(gaussian_laplace(RSmoothed, 9), 84) == False
imDABLog = self.filterImage(gaussian_laplace(DABSmoothed,9), 50)
imHLog = self.filterImage(gaussian_laplace(HSmoothed,9), 8)
imLog = self.filterImage(gaussian_laplace(BRSmoothed,9), 9)
imR = self.filterImage(R, 2.5)
imB = self.filterImage(B, 10.5)
imV = self.filterImage(V, 6.5)
imL = self.filterImage(L, 2.5)
imL2 = self.filterImage(L2, 2.5)
imE = self.filterImage(E, 18)
imH = self.filterImage(H, 95) == False
imDAB = self.filterImage(DAB, 55) == False
imBR = self.filterImage(BR, 63) == False
binaryImg = imR & imV & imB & imL & imL2 & imE & imH & imDAB & imLog & imBR & imLLog & imVLog & imELog & imHLog & imRLog & imDABLog
openImg = ndimage.binary_opening(binaryImg, iterations=2)
closedImg = ndimage.binary_closing(openImg, iterations=8)
if self.ploting:
plt.imshow(self.image)
plt.show()
plt.imshow(imR)
plt.show()
plt.imshow(imV)
plt.show()
plt.imshow(imB)
plt.show()
plt.imshow(imL)
plt.show()
plt.imshow(closedImg)
plt.show()
BRVL = np.zeros(self.image.shape)
BRVL[:,:,0] = BR
BRVL[:,:,1] = V
BRVL[:,:,2] = L/rangeL
#ResizeHEDAB, from 0 to 1.
HEDAB[:,:,0] = (H - minH)/rangeH
HEDAB[:,:,1] = (E - minE)/rangeE
HEDAB[:,:,2] = (DAB - minDAB)/rangeDAB
return (BinaryImageWorker(closedImg, self.rows, self.columns),
RGBImageWorker(HEDAB, self.rows, self.columns),
RGBImageWorker(BRVL, self.rows, self.columns), BinaryImageWorker(binaryImg, self.rows, self.columns))
def getPatch(self, center, size):
(beginningi, beginningj, finali, finalj) = self.getLimits(center, size)
subImageWorker = RGBImageWorker(self.image[beginningi:finali, beginningj:finalj, :], finali - beginningi, finalj - beginningj)
return subImageWorker
"""
It gets the binary characteristics of an image, such as area, peremiter or roundness.
It also gets the centers of all the blobs from the binary thresholded image.
"""
class BinaryImageWorker(ImageWorker):
def getPatch(self, center, size):
(beginningi, beginningj, finali, finalj) = self.getLimits(center, size)
subImageWorker = BinaryImageWorker(self.image[beginningi:finali, beginningj:finalj], finali - beginningi, finalj - beginningj)
return subImageWorker
def getGeneralStatistics(self):
area = np.sum(self.image)
perimeterArray = [len(x) for x in measure.find_contours(self.image, 0.5)]
perimeter = max(perimeterArray) if len(perimeterArray) != 0 else 0
roundness = 4 * area * pi / (perimeter * perimeter) if perimeter != 0 else 0
finalStatistics = [area, perimeter, roundness, len(self.getCenters())]
return finalStatistics
def getCenters(self):
lbl = ndimage.label(self.image)
centers = ndimage.measurements.center_of_mass(self.image, lbl[0], range(1, lbl[1] + 1))
centers = np.round(centers)
return centers | gpl-2.0 |
nelango/ViralityAnalysis | model/lib/pandas/io/common.py | 9 | 14607 | """Common IO api utilities"""
import sys
import os
import csv
import codecs
import zipfile
from contextlib import contextmanager, closing
from pandas.compat import StringIO, BytesIO, string_types, text_type
from pandas import compat
from pandas.core.common import pprint_thing, is_number
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
try:
from py.path import local as LocalPath
_PY_PATH_INSTALLED = True
except:
_PY_PATH_INSTALLED = False
if compat.PY3:
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
import urllib.parse as compat_parse
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError
from httplib import HTTPException
from contextlib import contextmanager, closing
from functools import wraps
# @wraps(_urlopen)
@contextmanager
def urlopen(*args, **kwargs):
with closing(_urlopen(*args, **kwargs)) as f:
yield f
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
class PerformanceWarning(Warning):
pass
class DtypeWarning(Warning):
pass
try:
from boto.s3 import key
class BotoFileLikeReader(key.Key):
"""boto Key modified to be more file-like
This modification of the boto Key will read through a supplied
S3 key once, then stop. The unmodified boto Key object will repeatedly
cycle through a file in S3: after reaching the end of the file,
boto will close the file. Then the next call to `read` or `next` will
re-open the file and start reading from the beginning.
Also adds a `readline` function which will split the returned
values by the `\n` character.
"""
def __init__(self, *args, **kwargs):
encoding = kwargs.pop("encoding", None) # Python 2 compat
super(BotoFileLikeReader, self).__init__(*args, **kwargs)
self.finished_read = False # Add a flag to mark the end of the read.
self.buffer = ""
self.lines = []
if encoding is None and compat.PY3:
encoding = "utf-8"
self.encoding = encoding
self.lines = []
def next(self):
return self.readline()
__next__ = next
def read(self, *args, **kwargs):
if self.finished_read:
return b'' if compat.PY3 else ''
return super(BotoFileLikeReader, self).read(*args, **kwargs)
def close(self, *args, **kwargs):
self.finished_read = True
return super(BotoFileLikeReader, self).close(*args, **kwargs)
def seekable(self):
"""Needed for reading by bz2"""
return False
def readline(self):
"""Split the contents of the Key by '\n' characters."""
if self.lines:
retval = self.lines[0]
self.lines = self.lines[1:]
return retval
if self.finished_read:
if self.buffer:
retval, self.buffer = self.buffer, ""
return retval
else:
raise StopIteration
if self.encoding:
self.buffer = "{}{}".format(self.buffer, self.read(8192).decode(self.encoding))
else:
self.buffer = "{}{}".format(self.buffer, self.read(8192))
split_buffer = self.buffer.split("\n")
self.lines.extend(split_buffer[:-1])
self.buffer = split_buffer[-1]
return self.readline()
except ImportError:
# boto is only needed for reading from S3.
pass
def _is_url(url):
"""Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
try:
return parse_url(url).scheme in _VALID_URLS
except:
return False
def _is_s3_url(url):
"""Check for an s3, s3n, or s3a url"""
try:
return parse_url(url).scheme in ['s3', 's3n', 's3a']
except:
return False
def maybe_read_encoded_stream(reader, encoding=None, compression=None):
"""read an encoded stream from the reader and transform the bytes to
unicode if required based on the encoding
Parameters
----------
reader : a streamable file-like object
encoding : optional, the encoding to attempt to read
Returns
-------
a tuple of (a stream of decoded bytes, the encoding which was used)
"""
if compat.PY3 or encoding is not None: # pragma: no cover
if encoding:
errors = 'strict'
else:
errors = 'replace'
encoding = 'utf-8'
if compression == 'gzip':
reader = BytesIO(reader.read())
else:
reader = StringIO(reader.read().decode(encoding, errors))
else:
if compression == 'gzip':
reader = BytesIO(reader.read())
encoding = None
return reader, encoding
def _expand_user(filepath_or_buffer):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, string_types):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def _validate_header_arg(header):
if isinstance(header, bool):
raise TypeError("Passing a bool to header is invalid. "
"Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names")
def _stringify_path(filepath_or_buffer):
"""Return the argument coerced to a string if it was a pathlib.Path
or a py.path.local
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : a the string version of the input path
"""
if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
return text_type(filepath_or_buffer)
if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
return filepath_or_buffer.strpath
return filepath_or_buffer
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer
passthru otherwise.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
-------
a filepath_or_buffer, the encoding, the compression
"""
if _is_url(filepath_or_buffer):
req = _urlopen(str(filepath_or_buffer))
if compression == 'infer':
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
compression = 'gzip'
else:
compression = None
# cat on the compression to the tuple returned by the function
to_return = list(maybe_read_encoded_stream(req, encoding, compression)) + \
[compression]
return tuple(to_return)
if _is_s3_url(filepath_or_buffer):
try:
import boto
except:
raise ImportError("boto is required to handle s3 files")
# Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# are environment variables
parsed_url = parse_url(filepath_or_buffer)
try:
conn = boto.connect_s3()
except boto.exception.NoAuthHandlerFound:
conn = boto.connect_s3(anon=True)
b = conn.get_bucket(parsed_url.netloc, validate=False)
if compat.PY2 and (compression == 'gzip' or
(compression == 'infer' and
filepath_or_buffer.endswith(".gz"))):
k = boto.s3.key.Key(b, parsed_url.path)
filepath_or_buffer = BytesIO(k.get_contents_as_string(
encoding=encoding))
else:
k = BotoFileLikeReader(b, parsed_url.path, encoding=encoding)
k.open('r') # Expose read errors immediately
filepath_or_buffer = k
return filepath_or_buffer, None, compression
# It is a pathlib.Path/py.path.local or string
filepath_or_buffer = _stringify_path(filepath_or_buffer)
return _expand_user(filepath_or_buffer), None, compression
def file_path_to_url(path):
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
return urljoin('file:', pathname2url(path))
# ZipFile is not a context manager for <= 2.6
# must be tuple index here since 2.6 doesn't use namedtuple for version_info
if sys.version_info[1] <= 6:
@contextmanager
def ZipFile(*args, **kwargs):
with closing(zipfile.ZipFile(*args, **kwargs)) as zf:
yield zf
else:
ZipFile = zipfile.ZipFile
def _get_handle(path, mode, encoding=None, compression=None):
"""Gets file handle for given path and mode.
"""
if compression is not None:
if encoding is not None and not compat.PY3:
msg = 'encoding + compression not yet supported in Python 2'
raise ValueError(msg)
if compression == 'gzip':
import gzip
f = gzip.GzipFile(path, mode)
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, mode)
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
if compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
else:
if compat.PY3:
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors='replace')
else:
f = open(path, mode)
return f
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def read(self, bytes=-1):
return self.reader.read(bytes).encode("utf-8")
def readline(self):
return self.reader.readline().encode("utf-8")
def next(self):
return next(self.reader).encode("utf-8")
# Python 3 iterator
__next__ = next
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
# python 3 iterator
__next__ = next
def __iter__(self): # pragma: no cover
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0) | mit |
crichardson17/starburst_atlas | SFH_comparison/data/Geneva_cont_Rot/Geneva_cont_Rot_5/fullgrid/peaks_reader.py | 1 | 5057 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
headerloc = "/Users/helen/Documents/Elon/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 3 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("{:d}.grd".format(i+1)):
gridFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
if file.endswith("{:d}.txt".format(i+1)):
emissionFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
print ("Files names constructed")
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 6.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks_Geneva_cont_5', max_values, delimiter='\t')
| gpl-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/metrics/tests/test_ranking.py | 6 | 41689 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| gpl-2.0 |
rkmaddox/mne-python | mne/viz/backends/_notebook.py | 4 | 12670 | """Notebook implementation of _Renderer and GUI."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager, nullcontext
from distutils.version import LooseVersion
import pyvista
from IPython.display import display
from ipywidgets import (Button, Dropdown, FloatSlider, FloatText, HBox,
IntSlider, IntText, Text, VBox, IntProgress, Play,
jsdlink)
from ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar,
_AbstractStatusBar, _AbstractLayout, _AbstractWidget,
_AbstractWindow, _AbstractMplCanvas, _AbstractPlayback,
_AbstractBrainMplCanvas, _AbstractMplInterface)
from ._pyvista import _PyVistaRenderer, _close_all, _set_3d_view, _set_3d_title # noqa: F401,E501, analysis:ignore
class _IpyLayout(_AbstractLayout):
def _layout_initialize(self, max_width):
self._layout_max_width = max_width
def _layout_add_widget(self, layout, widget, stretch=0):
widget.layout.margin = "2px 0px 2px 0px"
if not isinstance(widget, Play):
widget.layout.min_width = "0px"
children = list(layout.children)
children.append(widget)
layout.children = tuple(children)
# Fix columns
if self._layout_max_width is not None and isinstance(widget, HBox):
children = widget.children
width = int(self._layout_max_width / len(children))
for child in children:
child.layout.width = f"{width}px"
class _IpyDock(_AbstractDock, _IpyLayout):
def _dock_initialize(self, window=None):
self._dock_width = 300
self._dock = self._dock_layout = VBox()
self._dock.layout.width = f"{self._dock_width}px"
self._layout_initialize(self._dock_width)
def _dock_finalize(self):
pass
def _dock_show(self):
self._dock_layout.layout.visibility = "visible"
def _dock_hide(self):
self._dock_layout.layout.visibility = "hidden"
def _dock_add_stretch(self, layout):
pass
def _dock_add_layout(self, vertical=True):
return VBox() if vertical else HBox()
def _dock_add_label(self, value, align=False, layout=None):
layout = self._dock_layout if layout is None else layout
widget = Text(value=value, disabled=True)
self._layout_add_widget(layout, widget)
return _IpyWidget(widget)
def _dock_add_button(self, name, callback, layout=None):
widget = Button(description=name)
widget.on_click(lambda x: callback())
self._layout_add_widget(layout, widget)
return _IpyWidget(widget)
def _dock_named_layout(self, name, layout, compact):
layout = self._dock_layout if layout is None else layout
if name is not None:
hlayout = self._dock_add_layout(not compact)
self._dock_add_label(
value=name, align=not compact, layout=hlayout)
self._layout_add_widget(layout, hlayout)
layout = hlayout
return layout
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
layout = self._dock_named_layout(name, layout, compact)
klass = FloatSlider if double else IntSlider
widget = klass(
value=value,
min=rng[0],
max=rng[1],
readout=False,
)
widget.observe(_generate_callback(callback), names='value')
self._layout_add_widget(layout, widget)
return _IpyWidget(widget)
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, layout=None):
layout = self._dock_named_layout(name, layout, compact)
klass = FloatText if double else IntText
widget = klass(
value=value,
min=rng[0],
max=rng[1],
readout=False,
)
widget.observe(_generate_callback(callback), names='value')
self._layout_add_widget(layout, widget)
return _IpyWidget(widget)
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
layout = self._dock_named_layout(name, layout, compact)
widget = Dropdown(
value=value,
options=rng,
)
widget.observe(_generate_callback(callback), names='value')
self._layout_add_widget(layout, widget)
return _IpyWidget(widget)
def _dock_add_group_box(self, name, layout=None):
layout = self._dock_layout if layout is None else layout
hlayout = VBox()
self._layout_add_widget(layout, hlayout)
return hlayout
def _generate_callback(callback, to_float=False):
def func(data):
value = data["new"] if "new" in data else data["old"]
callback(float(value) if to_float else value)
return func
class _IpyToolBar(_AbstractToolBar, _IpyLayout):
def _tool_bar_load_icons(self):
self.icons = dict()
self.icons["help"] = "question"
self.icons["play"] = None
self.icons["pause"] = None
self.icons["reset"] = "history"
self.icons["scale"] = "magic"
self.icons["clear"] = "trash"
self.icons["movie"] = "video-camera"
self.icons["restore"] = "replay"
self.icons["screenshot"] = "camera"
self.icons["visibility_on"] = "eye"
self.icons["visibility_off"] = "eye"
def _tool_bar_initialize(self, name="default", window=None):
self.actions = dict()
self._tool_bar = self._tool_bar_layout = HBox()
self._layout_initialize(None)
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
icon_name = name if icon_name is None else icon_name
icon = self.icons[icon_name]
if icon is None:
return
widget = Button(tooltip=desc, icon=icon)
widget.on_click(lambda x: func())
self._layout_add_widget(self._tool_bar_layout, widget)
self.actions[name] = widget
def _tool_bar_update_button_icon(self, name, icon_name):
self.actions[name].icon = self.icons[icon_name]
def _tool_bar_add_text(self, name, value, placeholder):
widget = Text(value=value, placeholder=placeholder)
self._layout_add_widget(self._tool_bar_layout, widget)
self.actions[name] = widget
def _tool_bar_add_spacer(self):
pass
def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):
def callback():
fname = self.actions[f"{name}_field"].value
func(None if len(fname) == 0 else fname)
self._tool_bar_add_text(
name=f"{name}_field",
value=None,
placeholder="Type a file name",
)
self._tool_bar_add_button(
name=name,
desc=desc,
func=callback,
)
def _tool_bar_add_play_button(self, name, desc, func, shortcut=None):
widget = Play(interval=500)
self._layout_add_widget(self._tool_bar_layout, widget)
self.actions[name] = widget
return _IpyWidget(widget)
def _tool_bar_set_theme(self, theme):
pass
class _IpyMenuBar(_AbstractMenuBar):
def _menu_initialize(self, window=None):
pass
def _menu_add_submenu(self, name, desc):
pass
def _menu_add_button(self, menu_name, name, desc, func):
pass
class _IpyStatusBar(_AbstractStatusBar, _IpyLayout):
def _status_bar_initialize(self, window=None):
self._status_bar = self._status_bar_layout = HBox()
self._layout_initialize(None)
def _status_bar_add_label(self, value, stretch=0):
widget = Text(value=value, disabled=True)
self._layout_add_widget(self._status_bar_layout, widget)
return _IpyWidget(widget)
def _status_bar_add_progress_bar(self, stretch=0):
widget = IntProgress()
self._layout_add_widget(self._status_bar_layout, widget)
return _IpyWidget(widget)
def _status_bar_update(self):
pass
class _IpyPlayback(_AbstractPlayback):
def _playback_initialize(self, func, timeout, value, rng,
time_widget, play_widget):
play = play_widget._widget
play.min = rng[0]
play.max = rng[1]
play.value = value
slider = time_widget._widget
jsdlink((play, 'value'), (slider, 'value'))
jsdlink((slider, 'value'), (play, 'value'))
class _IpyMplInterface(_AbstractMplInterface):
def _mpl_initialize(self):
from matplotlib.backends.backend_nbagg import (FigureCanvasNbAgg,
FigureManager)
self.canvas = FigureCanvasNbAgg(self.fig)
self.manager = FigureManager(self.canvas, 0)
class _IpyMplCanvas(_AbstractMplCanvas, _IpyMplInterface):
def __init__(self, width, height, dpi):
super().__init__(width, height, dpi)
self._mpl_initialize()
class _IpyBrainMplCanvas(_AbstractBrainMplCanvas, _IpyMplInterface):
def __init__(self, brain, width, height, dpi):
super().__init__(brain, width, height, dpi)
self._mpl_initialize()
self._connect()
class _IpyWindow(_AbstractWindow):
def _window_close_connect(self, func):
pass
def _window_get_dpi(self):
return 96
def _window_get_size(self):
return self.figure.plotter.window_size
def _window_get_simple_canvas(self, width, height, dpi):
return _IpyMplCanvas(width, height, dpi)
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
w, h = self._window_get_mplcanvas_size(interactor_fraction)
self._interactor_fraction = interactor_fraction
self._show_traces = show_traces
self._separate_canvas = separate_canvas
self._mplcanvas = _IpyBrainMplCanvas(
brain, w, h, self._window_get_dpi())
return self._mplcanvas
def _window_adjust_mplcanvas_layout(self):
pass
def _window_get_cursor(self):
pass
def _window_set_cursor(self, cursor):
pass
def _window_new_cursor(self, name):
pass
@contextmanager
def _window_ensure_minimum_sizes(self):
yield
def _window_set_theme(self, theme):
pass
class _IpyWidget(_AbstractWidget):
def set_value(self, value):
self._widget.value = value
def get_value(self):
return self._widget.value
def set_range(self, rng):
self._widget.min = rng[0]
self._widget.max = rng[1]
def show(self):
self._widget.layout.visibility = "visible"
def hide(self):
self._widget.layout.visibility = "hidden"
def update(self, repaint=True):
pass
class _Renderer(_PyVistaRenderer, _IpyDock, _IpyToolBar, _IpyMenuBar,
_IpyStatusBar, _IpyWindow, _IpyPlayback):
def __init__(self, *args, **kwargs):
self._dock = None
self._tool_bar = None
self._status_bar = None
kwargs["notebook"] = True
super().__init__(*args, **kwargs)
def _update(self):
if self.figure.display is not None:
self.figure.display.update_canvas()
def _create_default_tool_bar(self):
self._tool_bar_load_icons()
self._tool_bar_initialize()
self._tool_bar_add_file_button(
name="screenshot",
desc="Take a screenshot",
func=self.screenshot,
)
def show(self):
# default tool bar
if self._tool_bar is None:
self._create_default_tool_bar()
display(self._tool_bar)
# viewer
if LooseVersion(pyvista.__version__) < LooseVersion('0.30'):
viewer = self.plotter.show(
use_ipyvtk=True, return_viewer=True)
else: # pyvista>=0.30.0
viewer = self.plotter.show(
jupyter_backend="ipyvtklink", return_viewer=True)
viewer.layout.width = None # unlock the fixed layout
# main widget
if self._dock is None:
main_widget = viewer
else:
main_widget = HBox([self._dock, viewer])
display(main_widget)
self.figure.display = viewer
# status bar
if self._status_bar is not None:
display(self._status_bar)
return self.scene()
_testing_context = nullcontext
| bsd-3-clause |
ddasilva/numpy | numpy/lib/npyio.py | 3 | 73866 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return asbytes
else:
return asstr
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
agiliq/django-graphos | graphos/tests.py | 1 | 38867 | from django.test import TestCase
from pymongo.errors import CollectionInvalid
from .sources.base import BaseDataSource
from .sources.simple import SimpleDataSource
from .sources.csv_file import CSVDataSource
from .sources.model import ModelDataSource
from .sources.mongo import MongoDBDataSource
from .renderers import base, flot, gchart, yui, matplotlib_renderer, highcharts
from .exceptions import GraphosException
from .utils import DEFAULT_HEIGHT, DEFAULT_WIDTH, get_default_options, get_db
from demo.models import Account
import os
import json
current_path = os.path.dirname(os.path.abspath(__file__))
class TestSources(TestCase):
def test_base_data_source(self):
data_source = BaseDataSource()
self.assertTrue(hasattr(data_source, "get_data"))
self.assertRaises(GraphosException, data_source.get_data,)
self.assertTrue(hasattr(data_source, "get_header"))
self.assertRaises(GraphosException, data_source.get_header)
self.assertTrue(hasattr(data_source, "get_first_column"))
self.assertRaises(GraphosException, data_source.get_first_column)
def test_simple_data_source(self):
data = [
['Year', 'Sales', 'Expenses'],
['2004', 1000, 400],
['2005', 1170, 460],
['2006', 660, 1120],
['2007', 1030, 540]
]
data_source = SimpleDataSource(data)
self.assertEqual(data_source.get_data(), data)
self.assertEqual(data_source.get_header(),
['Year', 'Sales', 'Expenses'])
self.assertEqual(data_source.get_first_column(),
['2004', '2005', '2006', '2007'])
def test_csv_data_source(self):
data = [
['Year', 'Sales', 'Expense'],
['2006', '1000', '400'],
['2007', '1170', '460'],
['2008', '660', '1120'],
['2009', '1030', '540']
]
csv_file = open(os.path.join(current_path, "test_data/accounts.csv"),
"r")
data_source = CSVDataSource(csv_file)
self.assertEqual(data, data_source.get_data())
self.assertEqual(data_source.get_header(),
['Year', 'Sales', 'Expense'])
self.assertEqual(data_source.get_first_column(),
['2006', '2007', '2008', '2009'])
def test_model_data_source(self):
data = [
['year', 'sales', 'expenses'],
[u'2004', 1000, 400],
[u'2005', 1170, 460],
[u'2006', 660, 1120],
[u'2007', 1030, 540]
]
#Create some rows
Account.objects.create(year="2004", sales=1000,
expenses=400, ceo="Welch")
Account.objects.create(year="2005", sales=1170,
expenses=460, ceo="Jobs")
Account.objects.create(year="2006", sales=660,
expenses=1120, ceo="Page")
Account.objects.create(year="2007", sales=1030,
expenses=540, ceo="Welch")
query_set = Account.objects.all()
data_source = ModelDataSource(query_set, ['year', 'sales', 'expenses'])
self.assertEqual(data, data_source.get_data())
self.assertEqual(data_source.get_header(),
['year', 'sales', 'expenses'])
self.assertEqual(data_source.get_first_column(),
['2004', '2005', '2006', '2007'])
def get_mongodb_test_db(db_name, collection_name):
cur_dir = os.path.dirname(os.path.realpath(__file__))
test_data_file = open(cur_dir + '/test_data/mongodb/test_zips.json')
db = get_db(db_name)
try:
db.create_collection(collection_name)
except CollectionInvalid:
pass
for line in test_data_file:
doc = json.loads(line)
db[collection_name].save(doc)
test_data_file.close()
return db
class TestMongoDBSource(TestCase):
def setUp(self):
db_name = "test_db"
collection_name = "zips"
self.db = get_mongodb_test_db(db_name, collection_name)
self.collection = self.db[collection_name]
self.cursor = self.collection.find()
self.fields = ['_id', 'pop']
self.data = [['_id', 'pop'], ['35004', 6055], ['35005', 10616],
['35006', 3205], ['35007', 14218], ['35010', 19942],
['35014', 3062], ['35016', 13650], ['35019', 1781],
['35020', 40549], ['35023', 39677], ['35031', 9058],
['35033', 3448], ['35034', 3791], ['35035', 1282],
['35040', 4675], ['35042', 4902], ['35043', 4781],
['35044', 7985], ['35045', 13990], ['35049', '']]
self.data_source = MongoDBDataSource(cursor=self.cursor,
fields=self.fields)
def test_data_source(self):
self.assertTrue(hasattr(self.data_source, 'get_data'))
self.assertTrue(hasattr(self.data_source, 'get_header'))
self.assertTrue(hasattr(self.data_source, 'get_first_column'))
self.assertEqual(self.data, self.data_source.get_data())
self.assertEqual(self.fields, self.data_source.get_header())
self.assertEqual(
[el[0] for el in self.data[1:]],
self.data_source.get_first_column()
)
def tearDown(self):
self.db.drop_collection(self.collection.name)
class TestBaseRenderer(TestCase):
def setUp(self):
data = [
['Year', 'Sales', 'Expenses'],
[2004, 1000, 400],
[2005, 1170, 460],
[2006, 660, 1120],
[2007, 1030, 540]
]
self.options = {"title": "Sales and Expences Graph"}
self.default_options = {'title': 'Chart'}
self.empty_options = {}
self.data_source = SimpleDataSource(data)
self.data = data
self.html_id = 'base_chart'
self.template = 'graphos/as_html.html'
self.header = data[0]
def test_base_chart(self):
chart = base.BaseChart(data_source=self.data_source,
options=self.options,
html_id=self.html_id)
empty_options_chart = base.BaseChart(data_source=self.data_source,
options=self.empty_options)
self.assertTrue(hasattr(chart, "width"))
self.assertEqual(DEFAULT_WIDTH, chart.width)
self.assertTrue(hasattr(chart, "height"))
self.assertEqual(DEFAULT_HEIGHT, chart.height)
self.assertTrue(hasattr(chart, "header"))
self.assertEqual(self.header, chart.header)
self.assertTrue(hasattr(chart, "get_data"))
self.assertEqual(self.data, chart.get_data())
self.assertTrue(hasattr(chart, "get_data_json"))
self.assertEqual(json.dumps(self.data), chart.get_data_json())
self.assertTrue(hasattr(chart, "get_options"))
self.assertEqual(self.options, chart.get_options())
self.assertEqual(self.default_options,
empty_options_chart.get_options())
self.assertTrue(hasattr(chart, "get_options_json"))
self.assertEqual(json.dumps(self.options),
chart.get_options_json())
self.assertTrue(hasattr(chart, "get_template"))
self.assertEqual(self.template, chart.get_template())
self.assertTrue(hasattr(chart, "get_html_template"))
self.assertRaises(GraphosException, chart.get_html_template)
self.assertTrue(hasattr(chart, "get_js_template"))
self.assertRaises(GraphosException, chart.get_js_template)
self.assertTrue(hasattr(chart, "get_html_id"))
self.assertTrue(self.html_id, chart.get_html_id())
self.assertTrue(hasattr(chart, "as_html"))
self.assertRaises(GraphosException, chart.as_html)
def test_options(self):
"""
Assert that options get set to a dictionary in case no options is passed during initialization
"""
chart = base.BaseChart(data_source=self.data_source)
self.assertEqual(self.default_options, chart.get_options())
class TestFlotRenderer(TestCase):
""" Test Cases for the graphos.renderers.flot module"""
def setUp(self):
data = [
['Year', 'Sales', 'Expenses'],
[2004, 1000, 400],
[2005, 1170, 460],
[2006, 660, 1120],
[2007, 1030, 540]
]
self.data_source = SimpleDataSource(data)
self.data = data
self.options = {"title": "Sales and Expences Graph"}
self.default_options = get_default_options()
self.series_1 = [(2004, 1000), (2005, 1170), (2006, 660), (2007, 1030)]
self.series_2 = [(2004, 400), (2005, 460), (2006, 1120), (2007, 540)]
self.html_template = 'graphos/flot/html.html'
self.js_template = 'graphos/flot/js.html'
self.data_source = SimpleDataSource(data)
self.data = data
self.html_id = 'base_chart'
self.header = data[0]
series_object_1 = {'data': [(2004, 1000),
(2005, 1170),
(2006, 660),
(2007, 1030)],
'label': 'Sales'}
series_object_2 = {'data': [(2004, 400),
(2005, 460),
(2006, 1120),
(2007, 540)],
'label': 'Expenses'}
self.series_objects = [series_object_1, series_object_2]
def test_base_flot_chart(self):
chart = flot.BaseFlotChart(data_source=self.data_source,
options=self.options)
empty_options_chart = flot.BaseFlotChart(data_source=self.data_source,
options={})
json_data = chart.get_serieses()
self.assertEqual([self.series_1, self.series_2], json_data)
self.assertEqual(self.html_template, chart.get_html_template())
self.assertEqual(self.js_template, chart.get_js_template())
default = get_default_options()
self.assertEqual(default,
empty_options_chart.get_options())
default.update(self.options)
self.assertEqual(default, chart.get_options())
self.assertEqual(json.dumps(default), chart.get_options_json())
self.assertEqual(self.series_objects, chart.get_series_objects())
self.assertEqual(json.dumps(self.series_objects),
chart.get_series_objects_json())
def test_line_chart(self):
chart = flot.LineChart(data_source=self.data_source,
options=self.options)
empty_options_chart = flot.LineChart(data_source=self.data_source,
options={})
json_data = chart.get_serieses()
self.assertEqual([self.series_1, self.series_2], json_data)
default = get_default_options("lines")
self.assertEqual(default,
empty_options_chart.get_options())
default.update(self.options)
self.assertEqual(default,
chart.get_options())
def test_bar_chart(self):
chart = flot.BarChart(data_source=self.data_source,
options=self.options)
empty_options_chart = flot.BarChart(data_source=self.data_source,
options={})
json_data = chart.get_serieses()
self.assertEqual([self.series_1, self.series_2], json_data)
default = get_default_options("bars")
self.assertEqual(default,
empty_options_chart.get_options())
default.update(self.options)
self.assertEqual(default,
chart.get_options())
def test_point_chart(self):
chart = flot.PointChart(data_source=self.data_source,
options=self.options)
empty_options_chart = flot.PointChart(data_source=self.data_source,
options={})
json_data = chart.get_serieses()
self.assertEqual([self.series_1, self.series_2], json_data)
default = get_default_options("points")
self.assertEqual(default,
empty_options_chart.get_options())
default.update(self.options)
self.assertEqual(default,
chart.get_options())
class TestGchartRenderer(TestCase):
def setUp(self):
data = [
['Year', 'Sales', 'Expenses'],
[2004, 1000, 400],
[2005, 1170, 460],
[2006, 660, 1120],
[2007, 1030, 540]
]
self.data_source = SimpleDataSource(data)
self.data = data
def test_line_chart(self):
chart = gchart.LineChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("LineChart" in chart.as_html())
def test_column_chart(self):
chart = gchart.ColumnChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("ColumnChart" in chart.as_html())
def test_bar_chart(self):
chart = gchart.BarChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("BarChart" in chart.as_html())
def test_pie_chart(self):
chart = gchart.PieChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("PieChart" in chart.as_html())
def test_area_chart(self):
chart = gchart.AreaChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("AreaChart" in chart.as_html())
def test_candlestick_chart(self):
# TODO: Change tests. Candlestick probably expects data in a particular format.
# Assert that data sent to candlestick is in correct format, and test accordingly
chart = gchart.CandlestickChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("CandlestickChart" in chart.as_html())
def test_gauge_chart(self):
# TODO: Change tests. Candlestick probably expects data in a particular format.
# Assert that data sent to candlestick is in correct format, and test accordingly
chart = gchart.GaugeChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("Gauge" in chart.as_html())
class TestBaseHighcharts(TestCase):
chart_klass = highcharts.BaseHighCharts
def setUp(self):
data = [
['Year', 'Sales', 'Expenses'],
[2004, 1000, 400],
[2005, 1170, 460],
[2006, 660, 1120],
[2007, 1030, 540]
]
self.data_source = SimpleDataSource(data)
self.categories = [2004, 2005, 2006, 2007]
self.x_axis_title = 'Year'
self.series = [{'name': 'Sales', 'data': [1000, 1170, 660, 1030]}, {'name': 'Expenses', 'data': [400, 460, 1120, 540]}]
def test_get_categories(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_categories(), self.categories)
self.assertEqual(chart.get_categories_json(), json.dumps(self.categories))
def test_get_series(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_series(), self.series)
def test_get_series_with_colors(self):
chart = self.chart_klass(data_source=self.data_source, options={'colors': ['red']})
series = [{'name': 'Sales', 'data': [1000, 1170, 660, 1030], 'color': 'red'}, {'name': 'Expenses', 'data': [400, 460, 1120, 540]}]
self.assertEqual(chart.get_series(), series)
chart = self.chart_klass(data_source=self.data_source, options={'colors': ['red', 'blue']})
series = [{'name': 'Sales', 'data': [1000, 1170, 660, 1030], 'color': 'red'}, {'name': 'Expenses', 'data': [400, 460, 1120, 540], 'color': 'blue'}]
self.assertEqual(chart.get_series(), series)
def test_get_title(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_title(), {'text': 'Chart'})
chart = self.chart_klass(data_source=self.data_source, options={'title': 'Highcharts'})
self.assertEqual(chart.get_title(), {'text': 'Highcharts'})
chart = self.chart_klass(data_source=self.data_source, options={'title': {'text': 'Highcharts', 'align': 'center'}})
self.assertEqual(chart.get_title(), {'text': 'Highcharts', 'align': 'center'})
def test_get_subtitle(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_subtitle(), {})
chart = self.chart_klass(data_source=self.data_source, options={'subtitle': 'Highcharts'})
self.assertEqual(chart.get_subtitle(), {'text': 'Highcharts'})
chart = self.chart_klass(data_source=self.data_source, options={'subtitle': {'text': 'Highcharts', 'align': 'center'}})
self.assertEqual(chart.get_subtitle(), {'text': 'Highcharts', 'align': 'center'})
def test_get_xaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_x_axis(), {'categories':self.categories, 'title': {'text': self.x_axis_title}})
chart = self.chart_klass(data_source=self.data_source, options={'xAxis': {'type': 'logarithmic', 'title': {'text': 'Sales vs Year'}}})
self.assertEqual(chart.get_x_axis(), {'categories':self.categories, 'title': {'text': 'Sales vs Year'}, 'type': 'logarithmic'})
def test_get_yaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_y_axis(), {'title': {'text': 'Values'}})
def test_get_yaxis_single_series(self):
single_data = [
['Year', 'Sales'],
[2004, 1000],
[2005, 1170],
[2006, 660],
[2007, 1030]
]
chart = self.chart_klass(data_source=SimpleDataSource(single_data))
self.assertEqual(chart.get_y_axis(), {'title': {'text': 'Sales'}})
def test_get_tooltip(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_tooltip(), {})
def test_get_credits(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_credits(), {})
def test_get_exporting(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_exporting(), {})
def test_get_legend(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_legend(), {})
def test_get_navigation(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_navigation(), {})
class TestHighchartsLineChart(TestBaseHighcharts):
chart_klass = highcharts.LineChart
def test_line_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'line'})
self.assertNotEqual(chart.as_html(), "")
class TestHighchartsBarChart(TestBaseHighcharts):
chart_klass = highcharts.BarChart
def test_bar_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'bar'})
self.assertNotEqual(chart.as_html(), "")
class TestHighchartsColumnChart(TestBaseHighcharts):
chart_klass = highcharts.ColumnChart
def test_column_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'column'})
self.assertNotEqual(chart.as_html(), "")
class TestHighchartsAreaChart(TestBaseHighcharts):
chart_klass = highcharts.AreaChart
def test_area_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'area'})
self.assertNotEqual(chart.as_html(), "")
class TestHighchartsPieChart(TestBaseHighcharts):
chart_klass = highcharts.PieChart
def test_pie_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'pie'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=self.data_source)
series = [
{'name': "Sales", "data": [{"name": 2004, "y": 1000}, {'name': 2005, 'y': 1170}, {'name': 2006, 'y': 660},
{'name': 2007, 'y': 1030}]},
{'name': 'Expenses', 'data': [{"name": 2004, "y": 400}, {'name': 2005, 'y': 460}, {'name': 2006, 'y': 1120},
{'name': 2007, 'y': 540}]}
]
self.assertEqual(chart.get_series(), series)
# This function should be modified when color ability is added to Piechart.
def test_get_series_with_colors(self):
chart = self.chart_klass(data_source=self.data_source, options={'colors': ['red']})
series = [
{'name': "Sales", "data": [{"name": 2004, "y": 1000}, {'name': 2005, 'y': 1170}, {'name': 2006, 'y': 660},
{'name': 2007, 'y': 1030}]},
{'name': 'Expenses', 'data': [{"name": 2004, "y": 400}, {'name': 2005, 'y': 460}, {'name': 2006, 'y': 1120},
{'name': 2007, 'y': 540}]}
]
self.assertEqual(chart.get_series(), series)
class TestHighchartsScatterChart(TestBaseHighcharts):
chart_klass = highcharts.ScatterChart
multiseriesdata = [
['State', 'Country', 'Rainfall', 'Precipitation'],
['Uttar Pradesh', 'India', 1, 2],
['Bihar', 'India', 2, 3],
['Telangana', 'India', 5, 7],
['Lahore', 'Pakistan', 9, 8],
['Hyderabad', 'Pakistan', 8, 7],
['Lahore', 'Pakistan', 3, 11]
]
def test_scatter_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'scatter'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=self.data_source)
series = [{'data': [{'Year': 2004, 'x': 1000, 'y': 400}, {'Year': 2005, 'x': 1170, 'y': 460},
{'Year': 2006, 'x': 660, 'y': 1120}, {'Year': 2007, 'x': 1030, 'y': 540}],
'name': 'Year'}]
self.assertEqual(chart.get_series(), series)
# Scatter Chart has ability to work with multiseries data.
chart = self.chart_klass(data_source=SimpleDataSource(self.multiseriesdata))
series = [{"data": [{"y": 8, "x": 9, "State": "Lahore"}, {"y": 7, "x": 8, "State": "Hyderabad"}, {"y": 11, "x": 3, "State": "Lahore"}], "name": "Pakistan"}, {"data": [{"y": 2, "x": 1, "State": "Uttar Pradesh"}, {"y": 3, "x": 2, "State": "Bihar"}, {"y": 7, "x": 5, "State": "Telangana"}], "name": "India"}]
self.assertEqual(chart.get_series(), series)
# This function should be modified when color ability is added to Scatter.
def test_get_series_with_colors(self):
chart = self.chart_klass(data_source=self.data_source, options={'colors': ['red']})
pass
def test_get_xaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_x_axis(),{'title': {'text': 'Sales'}})
def test_get_yaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_y_axis(), {'title': {'text': 'Expenses'}})
def test_get_yaxis_single_series(self):
pass
class TestHighchartsColumnLineChart(TestBaseHighcharts):
chart_klass = highcharts.ColumnLineChart
def test_line_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'column_line'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=self.data_source)
series = [{'type': 'column', 'data': [1000, 1170, 660, 1030], 'name': 'Sales'},{'data': [400, 460, 1120, 540], 'name': 'Expenses', 'type': 'line'}]
self.assertEqual(chart.get_series(), series)
# This function should be modified when color ability is added to ColumnLine.
def test_get_series_with_colors(self):
chart = self.chart_klass(data_source=self.data_source, options={'colors': ['red']})
series = [{'type': 'column', 'data': [1000, 1170, 660, 1030], 'name': 'Sales'},{'data': [400, 460, 1120, 540], 'name': 'Expenses', 'type': 'line'}]
self.assertEqual(chart.get_series(), series)
class TestHighchartsLineColumnChart(TestBaseHighcharts):
chart_klass = highcharts.LineColumnChart
def test_line_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'line_column'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=self.data_source)
series = [{'type': 'line', 'data': [1000, 1170, 660, 1030], 'name': 'Sales'},{'data': [400, 460, 1120, 540], 'name': 'Expenses', 'type': 'column'}]
self.assertEqual(chart.get_series(), series)
# This function should be modified when color ability is added to ColumnLine.
def test_get_series_with_colors(self):
chart = self.chart_klass(data_source=self.data_source, options={'colors': ['red']})
series = [{'type': 'line', 'data': [1000, 1170, 660, 1030], 'name': 'Sales'},{'data': [400, 460, 1120, 540], 'name': 'Expenses', 'type': 'column'}]
self.assertEqual(chart.get_series(), series)
class TestHighchartsFunnel(TestBaseHighcharts):
chart_klass = highcharts.Funnel
funnel_data = [['Unique users', 'Counts'],
['Website visits', 654],
['Downloads', 4064],
['Requested price list', 1987],
['Invoice sent', 976],
['Finalized', 846]
]
def test_funnel_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'funnel'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.funnel_data))
series = [{'data': [['Website visits', 654], ['Downloads', 4064], ['Requested price list', 1987],
['Invoice sent', 976], ['Finalized', 846]]}]
self.assertEqual(chart.get_series(), series)
# Needs to be modified when color functionality is added to Funnel
def test_get_series_with_colors(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.funnel_data))
series = [{'data': [['Website visits', 654],['Downloads', 4064],['Requested price list', 1987],['Invoice sent', 976],['Finalized', 846]]}]
self.assertEqual(chart.get_series(), series)
class TestHighchartsBubbleChart(TestBaseHighcharts):
chart_klass = highcharts.Bubble
def setUp(self):
data = [["Country", "Sugar Consumption", "Fat Consumption", "GDP"],
["India", 10, 15, 90],
["USA", 11, 20, 19],
["Srilanka", 15, 5, 98],
["Indonesia", 16, 35, 150]]
self.data_source = SimpleDataSource(data)
def test_bubble_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'bubble'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
bubble_chart_data_multi = [["Grade", "Country", "Sugar Consumption", "Fat Consumption", "GDP"],
["A", "India", 10, 15, 90],
["B", "India", 11, 20, 19],
["P", "USA", 39, 21, 100],
["O", "USA", 44, 29, 150]]
chart = self.chart_klass(data_source=self.data_source)
series = [{'data': [{'y': 15, 'Country': 'India', 'z': 90, 'x': 10}, {'y': 20, 'Country': 'USA', 'z': 19, 'x': 11}, {'y': 5, 'Country': 'Srilanka', 'z': 98, 'x': 15}, {'y': 35, 'Country': 'Indonesia', 'z': 150, 'x': 16}], 'name': 'Country'}]
self.assertEqual(chart.get_series(), series)
chart = self.chart_klass(data_source=SimpleDataSource(bubble_chart_data_multi))
series = [{'data': [{'Grade': 'A', 'x': 10, 'z': 90, 'y': 15}, {'Grade': 'B', 'x': 11, 'z': 19, 'y': 20}], 'name': 'India'}, {'data': [{'Grade': 'P', 'x': 39, 'z': 100, 'y': 21}, {'Grade': 'O', 'x': 44, 'z': 150, 'y': 29}], 'name': 'USA'}]
self.assertEqual(chart.get_series(), series)
# Needs to be modified when color functionality is added to Bubble
def test_get_series_with_colors(self):
pass
def test_get_yaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_y_axis(), {'title': {'text': 'Fat Consumption'}})
def test_get_xaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_x_axis(), {'title': {'text': 'Sugar Consumption'}})
# Bubble chart do not have any attribute categories.
def test_get_categories(self):
pass
# Bubble chart do not have any attribute yaxis singleseries.
def test_get_yaxis_single_series(self):
pass
class TestHighchartsHeatMap(TestBaseHighcharts):
chart_klass = highcharts.HeatMap
def test_heatmap_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'type': 'heatmap'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=self.data_source)
series = [{'data': [[0, 0, 1000], [0, 1, 400], [1, 0, 1170], [1, 1, 460], [2, 0, 660], [2, 1, 1120], [3, 0, 1030], [3, 1, 540]]}]
self.assertEqual(chart.get_series(), series)
def test_get_yaxis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_y_axis(), {'categories': ['Sales', 'Expenses']})
# This function should be modified when color ability is added to Heatmap.
def test_get_series_with_colors(self):
pass
def test_get_color_axis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_color_axis(), {})
def test_get_yaxis_single_series(self):
pass
class TestHighchartsTreeMap(TestBaseHighcharts):
chart_klass = highcharts.TreeMap
treemap_data = [["Country", "Cause", "Death Rate"],
["India", "Cardiovascular Disease", 10],
["India", "Road Accident", 5],
["China", "Cardiovascular Disease", 9],
["China", "Road Accident", 6],
]
def test_treemap_chart(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.treemap_data))
self.assertEqual(chart.get_chart(), {'type': 'treemap'})
self.assertNotEqual(chart.as_html(), "")
def test_get_series(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.treemap_data))
series = [{'data': [{'color': '#7cb5ec', 'value': 6, 'id': 'id_00', 'parent': 'id_0', 'name': 'Road Accident'}, {'color': '#7cb5ec', 'value': 9, 'id': 'id_01', 'parent': 'id_0', 'name': 'Cardiovascular Disease'}, {'color': '#7cb5ec', 'id': 'id_0', 'value': 15, 'name': 'China'}, {'color': '#434348', 'value': 5, 'id': 'id_12', 'parent': 'id_1', 'name': 'Road Accident'}, {'color': '#434348', 'value': 10, 'id': 'id_13', 'parent': 'id_1', 'name': 'Cardiovascular Disease'}, {'color': '#434348', 'id': 'id_1', 'value': 15, 'name': 'India'}]}]
self.assertEqual(chart.get_series(), series)
# Modifiy after color functionality is there in TreeMap
def test_get_series_with_colors(self):
pass
class TestHighchartsPieDonut(TestHighchartsPieChart):
chart_klass = highcharts.PieDonut
pie_data = [["Country", "Cause", "Death Rate"],
["India", "Cardiovascular Disease", 10],
["India", "Road Accident", 5],
["China", "Cardiovascular Disease", 9],
["China", "Road Accident", 6]]
def test_get_series(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.pie_data))
series = [{'showInLegend': True, 'dataLabels': {'enabled': False}, 'data': [{'color': '#7cb5ec', 'y': 15, 'name': 'China'}, {'color': '#434348', 'y': 15, 'name': 'India'}], 'name': 'Country', 'size': '60%'}, {'innerSize': '60%', 'data': [{'color': '#7cb5ec', 'y': 6, 'name': 'Road Accident'}, {'color': '#7cb5ec', 'y': 9, 'name': 'Cardiovascular Disease'}, {'color': '#434348', 'y': 5, 'name': 'Road Accident'}, {'color': '#434348', 'y': 10, 'name': 'Cardiovascular Disease'}], 'name': 'Cause', 'size': '80%'}]
self.assertEqual(chart.get_series(), series)
# To be modified once color functionality is added to Chart.
def test_get_series_with_colors(self):
pass
class TestHighchartsHighMap(TestBaseHighcharts):
chart_klass = highcharts.HighMap
map_data_us_multi_series_lat_lon = [
['Latitude', 'Longitude', 'Winner', 'Seats'],
[32.380120, -86.300629, 'Trump', 10],
[58.299740, -134.406794, 'Trump', 10]]
map_data_us_multi_series = [
['State', 'Winner', 'Seats'],
['us-nj', 'Trump', 10],
['us-ri', 'Trump', 10]]
map_data_us_lat_lon = [
['Latitude', 'Longitude', 'Population'],
[32.380120, -86.300629, 900],
[58.299740, -134.406794, 387],
[33.448260, -112.075774, 313],
]
map_data_us = [
['State', 'Population'],
['us-nj', 438],
['us-ri', 387]]
map_data_us_point = [
['Lat', 'Lon', 'Name', 'Date'],
[46.8797, -110.3626, 'trump', '25th February'],
[41.4925, -99.9018, 'trump', '26th February'],
[45.4925, -89.9018, 'trump', '27th February']]
def test_highmap_chart(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us))
self.assertEqual(chart.get_chart(), {'type': 'map'})
self.assertNotEqual(chart.as_html(), "")
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us_lat_lon))
self.assertEqual(chart.get_chart(), {'type': 'mapbubble'})
def test_get_series(self):
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us))
series = [{'joinBy': ['hc-key', 'code'], 'data': [{'code': 'us-nj', 'value': 438}, {'code': 'us-ri', 'value': 387}], 'name': 'Population'}]
self.assertEqual(chart.get_series(), series)
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us_point))
series = [{'color': 'black', 'type': 'map', 'name': 'Regions', 'showInLegend': False}, {'joinBy': ['hc-key', 'code'], 'data': [{'lat': 46.8797, 'Date': '25th February', 'lon': -110.3626}, {'lat': 41.4925, 'Date': '26th February', 'lon': -99.9018}, {'lat': 45.4925, 'Date': '27th February', 'lon': -89.9018}], 'name': 'trump'}]
self.assertEqual(chart.get_series(), series)
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us_lat_lon))
series = [{'type': 'map', 'name': 'Basemap', 'showInLegend': False}, {'joinBy': ['hc-key', 'code'], 'data': [{'lat': 32.38012, 'z': 900, 'lon': -86.300629}, {'lat': 58.29974, 'z': 387, 'lon': -134.406794}, {'lat': 33.44826, 'z': 313, 'lon': -112.075774}], 'name': 'Population'}]
self.assertEqual(chart.get_series(), series)
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us_multi_series))
series = [{'joinBy': ['hc-key', 'code'], 'data': [{'code': 'us-nj', 'Seats': 10}, {'code': 'us-ri', 'Seats': 10}], 'name': 'Trump'}]
self.assertEqual(chart.get_series(), series)
chart = self.chart_klass(data_source=SimpleDataSource(self.map_data_us_multi_series_lat_lon))
series = [{'color': 'black', 'type': 'map', 'name': 'Regions', 'showInLegend': False}, {'joinBy': ['hc-key', 'code'], 'data': [{'lat': 32.38012, 'lon': -86.300629, 'Seats': 10}, {'lat': 58.29974, 'lon': -134.406794, 'Seats': 10}], 'name': 'Trump'}]
self.assertEqual(chart.get_series(), series)
# Needs some modification
def test_get_series_with_colors(self):
pass
def test_get_color_axis(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_color_axis(), {})
# What should be done for this ? Should it be kept or removed ?
def test_get_map(self):
pass
def test_get_yaxis_single_series(self):
pass
class TestHighchartsDonutChart(TestHighchartsPieChart):
chart_klass = highcharts.DonutChart
def test_pie_chart(self):
chart = self.chart_klass(data_source=self.data_source)
self.assertEqual(chart.get_chart(), {'options3d': {'alpha': 45, 'enabled': True}, 'type': 'pie'})
self.assertNotEqual(chart.as_html(), "")
class TestYUIRenderer(TestCase):
def setUp(self):
data = [
['Year', 'Sales', 'Expenses'],
[2004, 1000, 400],
[2005, 1170, 460],
[2006, 660, 1120],
[2007, 1030, 540]
]
self.data_source = SimpleDataSource(data)
self.data = data
def test_line_chart(self):
chart = yui.LineChart(data_source=self.data_source)
self.assertNotEqual(chart.as_html(), "")
self.assertTrue("line" in chart.as_html())
class TestMatplotlibRenderer(TestCase):
def setUp(self):
data = [['Year', 'Sales', 'Expenses', 'Items Sold', 'Net Profit'],
['2004', 1000, 400, 100, 600],
['2005', 1170, 460, 120, 310],
['2006', 660, 1120, 50, -460],
['2007', 1030, 540, 100, 200]]
self.data_source = SimpleDataSource(data)
self.data = data
def test_line_chart(self):
chart = matplotlib_renderer.LineChart(self.data_source)
self.assertNotEqual(chart.as_html(), "")
def test_bar_chart(self):
chart = matplotlib_renderer.BarChart(self.data_source)
self.assertNotEqual(chart.as_html(), "")
| bsd-2-clause |
JosmanPS/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
joshbohde/scikit-learn | sklearn/datasets/tests/test_mldata.py | 2 | 5248 | """Test functionality of mldata fetching utilities."""
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import (assert_in, mock_urllib2)
from nose.tools import assert_equal, assert_raises
from nose import with_setup
from numpy.testing import assert_array_equal
import os
import shutil
import tempfile
import scipy as sp
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urllib2_ref = datasets.mldata.urllib2
datasets.mldata.urllib2 = mock_urllib2({'mock':
{'label': sp.ones((150,)),
'data': sp.ones((150, 4))}})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
assert_in(mock, in_=['COL_NAMES', 'DESCR', 'target', 'data'])
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.urllib2.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urllib2 = _urllib2_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urllib2_ref = datasets.mldata.urllib2
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urllib2 = mock_urllib2({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
assert_in(dset, in_=['COL_NAMES', 'DESCR', 'data'], out_=['target'])
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urllib2 = _urllib2_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urllib2_ref = datasets.mldata.urllib2
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urllib2 = mock_urllib2({dataname:
({'label': y,
'data': x,
'z': z},
['z', 'data', 'label'])})
dset = fetch_mldata(dataname, data_home=tmpdir)
assert_in(dset, in_=['COL_NAMES', 'DESCR', 'target', 'data', 'z'],
out_=['x', 'y'])
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urllib2 = mock_urllib2({dataname:
({'y': y,
'x': x,
'z': z},
['y', 'x', 'z'])})
dset = fetch_mldata(dataname, data_home=tmpdir)
assert_in(dset, in_=['COL_NAMES', 'DESCR', 'target', 'data', 'z'],
out_=['x', 'y'])
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urllib2 = mock_urllib2({dataname:
({'y': y,
'x': x,
'z': z},
['z', 'x', 'y'])})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
assert_in(dset, in_=['COL_NAMES', 'DESCR', 'target', 'data', 'x'],
out_=['z', 'y'])
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
assert_in(dset, in_=['COL_NAMES', 'DESCR', 'target', 'data', 'x'],
out_=['z', 'y'])
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
finally:
datasets.mldata.urllib2 = _urllib2_ref
| bsd-3-clause |
dgwakeman/mne-python | mne/parallel.py | 13 | 5045 | """Parallel util function
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
from .externals.six import string_types
import inspect
import logging
import os
from . import get_config
from .utils import logger, verbose
if 'MNE_FORCE_SERIAL' in os.environ:
_force_serial = True
else:
_force_serial = None
@verbose
def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
"""Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func: callable
A function
n_jobs: int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
INFO or DEBUG will print parallel status, others will not.
max_nbytes : int, str, or None
Threshold on the minimum size of arrays passed to the workers that
triggers automated memmory mapping. Can be an int in Bytes,
or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays. Use 'auto' to
use the value set using mne.set_memmap_min_size.
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object
my_func: callable
func if not parallel or delayed(func)
n_jobs: int
Number of jobs >= 0
"""
# for a single job, we don't need joblib
if n_jobs == 1:
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
try:
from joblib import Parallel, delayed
except ImportError:
try:
from sklearn.externals.joblib import Parallel, delayed
except ImportError:
logger.warning('joblib not installed. Cannot run in parallel.')
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
# check if joblib is recent enough to support memmaping
p_args = inspect.getargspec(Parallel.__init__).args
joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
cache_dir = get_config('MNE_CACHE_DIR', None)
if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':
max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
if max_nbytes is not None:
if not joblib_mmap and cache_dir is not None:
logger.warning('"MNE_CACHE_DIR" is set but a newer version of '
'joblib is needed to use the memmapping pool.')
if joblib_mmap and cache_dir is None:
logger.info('joblib supports memapping pool but "MNE_CACHE_DIR" '
'is not set in MNE-Python config. To enable it, use, '
'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
'store temporary files under /dev/shm and can result '
'in large memory savings.')
# create keyword arguments for Parallel
kwargs = {'verbose': 5 if logger.level <= logging.INFO else 0}
if joblib_mmap:
if cache_dir is None:
max_nbytes = None # disable memmaping
kwargs['temp_folder'] = cache_dir
kwargs['max_nbytes'] = max_nbytes
n_jobs = check_n_jobs(n_jobs)
parallel = Parallel(n_jobs, **kwargs)
my_func = delayed(func)
return parallel, my_func, n_jobs
def check_n_jobs(n_jobs, allow_cuda=False):
"""Check n_jobs in particular for negative values
Parameters
----------
n_jobs : int
The number of jobs.
allow_cuda : bool
Allow n_jobs to be 'cuda'. Default: False.
Returns
-------
n_jobs : int
The checked number of jobs. Always positive (or 'cuda' if
applicable.)
"""
if not isinstance(n_jobs, int):
if not allow_cuda:
raise ValueError('n_jobs must be an integer')
elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':
raise ValueError('n_jobs must be an integer, or "cuda"')
# else, we have n_jobs='cuda' and this is okay, so do nothing
elif _force_serial:
n_jobs = 1
logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
'serial mode.')
elif n_jobs <= 0:
try:
import multiprocessing
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if n_jobs <= 0:
raise ValueError('If n_jobs has a negative value it must not '
'be less than the number of CPUs present. '
'You\'ve got %s CPUs' % n_cores)
except ImportError:
# only warn if they tried to use something other than 1 job
if n_jobs != 1:
logger.warning('multiprocessing not installed. Cannot run in '
'parallel.')
n_jobs = 1
return n_jobs
| bsd-3-clause |
kazuto1011/rcnn-server | tms_ss_ssd/nodes/ssd_pascalvoc.py | 1 | 6252 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
if 'SSD_ROOT' not in os.environ:
print "Could not find 'SSD_ROOT'."
sys.exit(1)
SSD_ROOT = os.environ['SSD_ROOT']
print "SDD=" + SSD_ROOT
sys.path.insert(0, os.path.join(SSD_ROOT, 'python'))
import caffe
import cv2
import argparse
import rospy as rp
import tms_ss_ssd.srv as srv
import tms_ss_ssd.msg as msg
from google.protobuf import text_format
from caffe.proto import caffe_pb2
# load PASCAL VOC labels
labelmap_file = os.path.join(SSD_ROOT, 'data/VOC0712/labelmap_voc.prototxt')
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
def parse_args():
parser = argparse.ArgumentParser(description='Single Shot Detector demo')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
# parser.add_argument('--net', dest='demo_net',
# help='Network to use [vgg16]',
# choices=NETS.keys(), default='vgg16')
parser.add_argument('--conf', dest='conf_thresh',
default=0.6, type=float)
parser.add_argument('--nms', dest='nms_thresh',
default=0.3, type=float)
return parser.parse_args()
def load_net(args):
model_def = os.path.join(SSD_ROOT, 'models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt')
model_weights = os.path.join(SSD_ROOT, 'models/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300_iter_60000.caffemodel')
return caffe.Net(model_def, model_weights, caffe.TEST)
class SSD:
def __init__(self, name, args):
self._args = args
self._name = name
self._init = False
rp.loginfo("Ready to start")
self._server = rp.Service(self._name, srv.obj_detection, self._callback)
def _callback(self, req):
rp.loginfo("Received an image")
if self._init is False:
self._net = load_net(self._args)
self.transformer = caffe.io.Transformer({'data': self._net.blobs['data'].data.shape})
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_mean('data', np.array([104,117,123]))
self.transformer.set_raw_scale('data', 255)
self.transformer.set_channel_swap('data', (2,1,0))
self._init = True
if self._args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(self._args.gpu_id)
# convert rosmsg to cv image
np_array = np.fromstring(req.image.data, np.uint8)
image = cv2.imdecode(np_array, cv2.CV_LOAD_IMAGE_COLOR)
objects = self._detect(image)
return srv.obj_detectionResponse(objects)
def _detect(self, image):
# set net to batch size of 1
image_resize = 300
self._net.blobs['data'].reshape(1, 3, image_resize, image_resize)
image = np.asarray(image, np.float32)
image /= 255
transformed_image = self.transformer.preprocess('data', image)
self._net.blobs['data'].data[...] = transformed_image
# Forward pass.
detections = self._net.forward()['detection_out']
# Parse the outputs.
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= self._args.conf_thresh]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
currentAxis = plt.gca()
obj_list = []
for i in xrange(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * image.shape[1]))
ymin = int(round(top_ymin[i] * image.shape[0]))
xmax = int(round(top_xmax[i] * image.shape[1]))
ymax = int(round(top_ymax[i] * image.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = top_labels[i]
display_txt = '%s: %.2f'%(label_name, score)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
obj = msg.object()
obj.class_name = label_name
obj.score = top_conf[i]
obj.region.x_offset = xmin
obj.region.y_offset = ymin
obj.region.width = xmax-xmin+1
obj.region.height = ymax-ymin+1
obj.region.do_rectify = False
obj_list.append(obj)
return obj_list
class NodeMain:
def __init__(self):
rp.init_node('ssd', anonymous=False)
rp.on_shutdown(self.shutdown)
args = parse_args()
node = SSD('ssd', args)
rp.spin()
@staticmethod
def shutdown():
rp.loginfo("Shutting down")
if __name__ == '__main__':
try:
NodeMain()
except rp.ROSInterruptException:
rp.loginfo("Terminated")
| mit |
danmackinlay/branching_process | branching_process/datasets/youtube_sornette/fileio.py | 1 | 13901 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from time import time
from pathlib import Path
import os, os.path
import numpy as np
from numpy import random
# from excited_util import to_rng, uuid_source
from zlib import crc32
import pickle
import warnings
from .test_sets import MEXICAN_SINGER
pd.set_option('io.hdf.default_format', 'table')
inf = float("inf")
class RawYoutube:
def __init__(self, base_path, *args, **kwargs):
self.base_path = Path(base_path)
self.raw_path = self.base_path / 'raw.h5'
def get_raw_store(self, mode="r"):
return get_store(str(self.raw_path), mode=mode)
def get_raw_series(self, video_id=MEXICAN_SINGER):
with self.get_raw_store(mode="r") as store:
query = "video_id=={!r}".format(video_id)
return (
store.select("tseries", query),
store.select("video_meta", query)
)
#
# def shardhash_from_filename(filename, nc=2):
# return filename[2:2+nc]
# def filename_from_shardhash(shard):
# return "G_" + shard + ".h5"
# def is_valid_shard_filename(filename, nc=2):
# return filename.startswith("G_") and filename.endswith(".h5") and len(filename)==(nc+5)
#
# def gen_shardhash(video_id, nc=2):
# """convert video_id to shardgroup hash by taking two chars of hex digest.
# crc32 is faster than md5 by a factor of 10^3,
# which is significant with this many rows.
# (adler32 was even faster but wasn't distributing the series evenly.)
# """
# return hex(crc32(video_id) & 0xffffffff)[-nc:]
#
#
def get_store(fn, mode="r"):
"""
Open an HDF5 file by absolute path without messing around with trying
to find the right shard.
Set sensible compression etc.
"""
#If we chose "write" then nuke what was already there.
#this didn't seem to work for a while otherwise
basedir = os.path.dirname(fn)
if not os.path.exists(basedir):
os.makedirs(basedir)
if mode=="w" and os.path.exists(fn):
warnings.warn("deleting", fn)
os.unlink(fn)
return pd.HDFStore(
fn,
mode=mode,
complevel=5,
complib="blosc",
chunksize=2<<18
)
#
# def get_store_by_path_chunks(fn_chunks, mode="r", **kwargs):
# """
# Open an HDF5 file by path chunks, because i am sick of typing
# os.path.join
# """
# fn = os.path.join(*fn_chunks)
# return get_store(fn, mode=mode, **kwargs)
#
# def get_store_2(pdir=DATA_DIR_ANALYSIS,
# fn="def",
# mode="r",
# **kwargs):
# """
# Open an HDF5 file by path chunks, because i am sick of typing
# os.path.join
# """
# return get_store_by_path_chunks([pdir, fn + ".h5"], mode=mode, **kwargs)
#
# def get_result_2(pdir=DATA_DIR_ANALYSIS,
# fn="def",
# hd5path="res",
# columns=None,
# **kwargs):
# """
# Open an HDF5 file by path chunks, because i am sick of typing
# os.path.join
# """
# with get_store_2(pdir, fn, mode="r", **kwargs) as st:
# return st.select(hd5path, columns=columns)
#
# def get_shard(basedir=DATA_DIR_COOKED, shard="00", mode="r"):
# """
# Open an HDF5 file by shard path
# """
# fn = os.path.join(basedir, filename_from_shardhash(shard))
# return get_store_by_path_chunks(
# [basedir, filename_from_shardhash(shard)], mode=mode
# )
#
# def get_all_shards(basedir=DATA_DIR_COOKED, include_store=True, nc=2):
# for filename in os.listdir(basedir):
# if not is_valid_shard_filename(filename, nc): continue
# shardhash = shardhash_from_filename(filename, nc)
# if include_store:
# yield shardhash, get_shard(basedir, shardhash, mode="r")
# else:
# yield shardhash
#
# def get_shard_by_vid(
# basedir=DATA_DIR_COOKED,
# video_id=MEXICAN_SINGER,
# mode="r", nc=2):
# """
# Open an HDF5 file by shard path
# """
# shard = gen_shardhash(video_id, nc)
# return get_store_by_path_chunks(
# [basedir, filename_from_shardhash(shard)], mode=mode
# )
#
# def get_one_series(basedir=DATA_DIR_COOKED, video_id=MEXICAN_SINGER,
# transform_fn=None, nc=2):
# """
# >>> ts, vm = fileio.get_one_series(_settings.DATA_DIR_CHOPPED, "-2IXE5DcWzg")
# """
# shard = gen_shardhash(video_id, nc)
# if transform_fn is None:
# transform_fn = lambda t, m: (t, m)
# with get_shard(basedir, shard) as store:
# query = "video_id=={!r}".format(video_id)
# return transform_fn(
# store.select("tseries", query),
# store.select("video_meta", query))
#
# def iter_series_from_shard(store, transform_fn=None, limit=inf, nc=2, **kwargs):
# i = 0
# if transform_fn is None:
# transform_fn = lambda t, m, **kwargs: (t, m)
# for vid, tseries in store['tseries'].groupby(["video_id"], sort=False):
# query = "video_id=={!r}".format(vid)
# video_meta = store.select("video_meta", query).iloc[0]
# result = transform_fn(tseries, video_meta, **kwargs)
# if result is not None:
# yield result
# i +=1
# if i >= limit:
# raise StopIteration
#
# def iter_series_from_dir(basedir=DATA_DIR_COOKED, transform_fn=None, limit=inf, nc=2, **kwargs):
# i = 0
# if transform_fn is None:
# transform_fn = lambda t, m: (t, m)
# for shardhash, store in get_all_shards(basedir, include_store=True):
# with store:
# for result in iter_series_from_shard(
# store, transform_fn=fransform_fn, nc=nc, **kwargs):
# yield result
# i +=1
# if i >= limit:
# raise StopIteration
#
# def get_concatenated_shards_table(
# basedir=DATA_DIR_COOKED,
# hd5path="video_meta",
# columns=None,
# nc=2,
# query=None,
# ignore_index=True):
# """return an in-memory table synthesized from the pieces"""
# shardlist = []
# for item in get_all_shards(basedir, include_store=True, nc=nc):
# with item[1] as shard:
# shardlist.append(shard.select(hd5path, query, columns=columns))
# return pd.concat(shardlist, ignore_index=ignore_index)
#
# def get_all_video_ids():
# """return an array of all video ids using cached index"""
# return get_concatenated_shards_table(DATA_DIR_IDX,
# hd5path="idx",
# columns=["video_id"])
#
# def find_in_index(vid=MEXICAN_SINGER, columns=None, with_text=False):
# columns = [
# u'id_hash',
# u'n_samples',
# u'start_time',
# u'end_time',
# u'count_inc_mean',
# u'count_inc_std',
# u'time_inc_mean',
# u'time_inc_std',
# u'rate_mean',
# u'rate_std',
# u'time_span',
# u'start_count',
# u'count_span',
# u'c05_sample',
# u'c10_sample',
# u'c25_sample',
# u'c50_sample',
# u'c75_sample',
# u'c90_sample',
# u'c95_sample',
# u'upload_time',
# u'length'
# ]
#
# if with_text:
# columns.extend([u'author', u'title', u'channel'])
#
# with get_shard_by_vid(basedir=DATA_DIR_IDX, video_id=vid) as store:
# return store.select('idx',"index==vid & columns=columns").iloc[0,:]
#
# def get_index_table(columns=None, with_text=False,query=None):
# """return an in-memory table synthesized from the pieces"""
# if columns is None:
# columns = [
# u'id_hash',
# u'n_samples',
# u'start_time',
# u'end_time',
# u'count_inc_mean',
# u'count_inc_std',
# u'time_inc_mean',
# u'time_inc_std',
# u'rate_mean',
# u'rate_std',
# u'time_span',
# u'start_count',
# u'count_span',
# u'c05_sample',
# u'c10_sample',
# u'c25_sample',
# u'c50_sample',
# u'c75_sample',
# u'c90_sample',
# u'c95_sample',
# u'upload_time',
# u'length'
# ]
# if with_text:
# columns.extend([u'author', u'title', u'channel'])
#
# idx = get_concatenated_shards_table(
# DATA_DIR_IDX, "idx",
# columns=columns,
# ignore_index=False,
# query=query)
# # now stored in the index, but persists somehow as a column
# #idx.drop(["video_id"],inplace=True,axis=1)
# return idx
#
# def get_description_table(columns=None):
# """return an in-memory table synthesized from the pieces"""
# return get_result_2(
# DATA_DIR_ANALYSIS, fn="description_idx", columns=columns)
#
# def map_shards(
# pool=None,
# base_dir=DATA_DIR_CHOPPED,
# process_fn=None, nc=2,
# *args, **kwargs):
# """
# run the given store-level function on all the stores in a dir.
# """
# if pool is None:
# from parallel import dummy_pool
# pool = dummy_pool
# shard_list = get_all_shards(base_dir, include_store=False, nc=nc)
# pool.map(process_fn, shard_list, *args, **kwargs)
# return pool
#
# def reduce_shards(
# base_dir=DATA_DIR_CHOPPED,
# reduce_fn=None, nc=2,
# *args, **kwargs):
# """
# put the shards back together
# """
# if reduce_fn is None:
# reduce_fn = lambda x: x
# shard_list = get_all_shards(base_dir, include_store=False, nc=nc)
# return reduce_fn(shard_list, *args, **kwargs)
#
# def reduce_pool(
# map_pool_key,
# reduce_fn=None,
# *args, **kwargs):
# """
# run the given store-level function on all the stores in a dir.
# """
# if reduce_fn is None:
# reduce_fn = lambda x: x
# from parallel import TrackingPool
# pool = TrackingPool(pool_key=map_pool_key)
# file_list = pool.all_files(abs_p=True)
# return reduce_fn(file_list, *args, **kwargs)
#
# def purge_pool(map_pool_key):
# reduce_pool(map_pool_key, lambda p: os.unlink(p))
#
# def get_concatenated_pool_table(
# map_pool_key,
# hd5path="video_meta",
# columns=None,
# base_dir=DATA_DIR_JOB_OUTPUT):
# """
# for playing concatenated pool
# """
# from parallel import TrackingPool
#
# pool = TrackingPool(pool_key=map_pool_key)
# file_list = pool.all_files(abs_p=True, base_dir=base_dir)
# table_list = []
# for filepath in file_list:
# with pd.HDFStore(filepath, mode="r") as store:
# try:
# table_list.append(store.select(hd5path, columns=columns))
# except KeyError, e:
# warnings.warn(
# "no matching object {hd5path!r} in {filepath!r}".format(
# hd5path=hd5path,
# filepath=filepath,
# )
# )
#
# return pd.concat(table_list, ignore_index=True)
#
# def store_concatenated_pool_table(
# map_pool_key,
# hd5path="res",
# out_hd5path=None,
# mode="a",
# base_dir_in=DATA_DIR_JOB_OUTPUT,
# base_dir_out=DATA_DIR_ANALYSIS,
# ):
# if out_hd5path is None:
# out_hd5path = hd5path
# c_table = get_concatenated_pool_table(
# map_pool_key, hd5path=hd5path,
# base_dir=base_dir_in)
# with get_store_by_path_chunks([
# base_dir_out,
# map_pool_key + ".h5"
# ], mode=mode) as st:
# st.append(out_hd5path, c_table,
# data_columns=True, #index all
# index=True, #We can index these because they are atomic
# )
# return c_table
#
# def store_concatenated_pool_table_on_disk(
# map_pool_key,
# hd5path="video_meta",
# out_hd5path=None,
# mode="a",
# base_dir_in=DATA_DIR_JOB_OUTPUT,
# base_dir_out=DATA_DIR_ANALYSIS,
# ):
# """low-memory version"""
# if out_hd5path is None:
# out_hd5path = hd5path
# pool = TrackingPool(pool_key=map_pool_key)
# file_list = pool.all_files(abs_p=True, base_dir=base_dir_in)
# with get_store_by_path_chunks([
# base_dir_out, map_pool_key + ".h5"], mode=mode) as write_store:
# for filepath in file_list:
# with pd.HDFStore(filepath, mode="r") as read_store:
# try:
# write_store.append(out_hd5path,
# read_store.select(hd5path, columns=columns),
# data_columns=True, #index all
# index=False,
# )
# except KeyError, e:
# warnings.warn(
# "no matching object {hd5path!r} in {filepath!r}".format(
# hd5path=hd5path,
# filepath=filepath,
# )
# )
# write_store.create_table_index(out_hd5path, optlevel=9, kind='full')
# #we just return the store; the table is by assumption too big.
# c_store = get_store_by_path_chunks([
# base_dir_out, map_pool_key + ".h5"], mode="a")
# return c_store
#
# def decorate(frame, index=None):
# """refresh fit table metadata with info from the index"""
# if index is None:
# index = get_index_table()
# index_cols = index.columns
# frame_cols = frame.columns
# wanted_frame_cols = frame_cols.difference(index_cols)
# frame = frame[wanted_frame_cols]
# frame = pd.merge(
# frame, index,
# left_on=["video_id"],
# right_index=True,
# copy=False,
# how="left",
# sort=False,
# )
# return frame
| mit |
wronk/mne-python | examples/inverse/plot_label_activation_from_stc.py | 62 | 1949 | """
==================================================
Extracting time course from source_estimate object
==================================================
Load a SourceEstimate object from stc files and
extract the time course of activation in
individual labels, as well as in a complex label
formed through merging two labels.
"""
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
meg_path = data_path + '/MEG/sample'
# load the stc
stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
# load the labels
aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
# extract the time course for different labels from the stc
stc_lh = stc.in_label(aud_lh)
stc_rh = stc.in_label(aud_rh)
stc_bh = stc.in_label(aud_lh + aud_rh)
# calculate center of mass and transform to mni coordinates
vtx, _, t_lh = stc_lh.center_of_mass('sample')
mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
vtx, _, t_rh = stc_rh.center_of_mass('sample')
mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
# plot the activation
plt.figure()
plt.axes([.1, .275, .85, .625])
hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0]
hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0]
hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0]
plt.xlabel('Time (s)')
plt.ylabel('Source amplitude (dSPM)')
plt.xlim(stc.times[0], stc.times[-1])
# add a legend including center-of-mass mni coordinates to the plot
labels = ['LH: center of mass = %s' % mni_lh.round(2),
'RH: center of mass = %s' % mni_rh.round(2),
'Combined LH & RH']
plt.figlegend([hl, hr, hb], labels, 'lower center')
plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
plt.show()
| bsd-3-clause |
arbazkhan002/datasketch | benchmark/minhash_benchmark.py | 1 | 2087 | '''
Benchmarking the performance and accuracy of MinHash.
'''
import time, logging
from numpy import random
from hashlib import sha1
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datasketch.minhash import MinHash
logging.basicConfig(level=logging.INFO)
# Produce some bytes
int_bytes = lambda x : ("a-%d-%d" % (x, x)).encode('utf-8')
def run_perf(card, num_perm):
m = MinHash(num_perm=num_perm)
logging.info("MinHash using %d permutation functions" % num_perm)
start = time.clock()
for i in range(card):
m.update(int_bytes(i))
duration = time.clock() - start
logging.info("Digested %d hashes in %.4f sec" % (card, duration))
return duration
def _run_acc(size, seed, num_perm):
m = MinHash(num_perm=num_perm)
s = set()
random.seed(seed)
for i in range(size):
v = int_bytes(random.randint(1, size))
m.update(v)
s.add(v)
return (m, s)
def run_acc(size, num_perm):
logging.info("MinHash using %d permutation functions" % num_perm)
m1, s1 = _run_acc(size, 1, num_perm)
m2, s2 = _run_acc(size, 4, num_perm)
j = float(len(s1.intersection(s2)))/float(len(s1.union(s2)))
j_e = m1.jaccard(m2)
err = abs(j - j_e)
return err
num_perms = range(10, 256, 20)
output = "minhash_benchmark.png"
logging.info("> Running performance tests")
card = 5000
run_times = [run_perf(card, n) for n in num_perms]
logging.info("> Running accuracy tests")
size = 5000
errs = [run_acc(size, n) for n in num_perms]
logging.info("> Plotting result")
fig, axe = plt.subplots(1, 2, sharex=True, figsize=(10, 4))
ax = axe[1]
ax.plot(num_perms, run_times, marker='+')
ax.set_xlabel("Number of permutation functions")
ax.set_ylabel("Running time (sec)")
ax.set_title("MinHash performance")
ax.grid()
ax = axe[0]
ax.plot(num_perms, errs, marker='+')
ax.set_xlabel("Number of permutation functions")
ax.set_ylabel("Absolute error in Jaccard estimation")
ax.set_title("MinHash accuracy")
ax.grid()
plt.tight_layout()
fig.savefig(output)
logging.info("Plot saved to %s" % output)
| mit |
nilearn/nilearn_sandbox | nilearn_sandbox/mass_univariate/tests/test_utils.py | 1 | 6016 | import numpy as np
from sklearn.utils import check_random_state
from numpy.testing import assert_array_almost_equal, assert_raises
from nilearn.mass_univariate.utils import (
orthonormalize_matrix, normalize_matrix_on_axis,
t_score_with_covars_and_normalized_design)
def get_tvalue_with_alternative_library(tested_vars, target_vars, covars=None):
"""Utility function to compute tvalues with linalg or statsmodels
Massively univariate linear model (= each target is considered
independently).
Parameters
----------
tested_vars: array-like, shape=(n_samples, n_regressors)
Tested variates, the associated coefficient of which are to be tested
independently with a t-test, resulting in as many t-values.
target_vars: array-like, shape=(n_samples, n_targets)
Target variates, to be approximated with a linear combination of
the tested variates and the confounding variates.
covars: array-like, shape=(n_samples, n_confounds)
Confounding variates, to be fitted but not to be tested
Returns
-------
t-values: np.ndarray, shape=(n_regressors, n_targets)
"""
### set up design
n_samples, n_regressors = tested_vars.shape
n_targets = target_vars.shape[1]
if covars is not None:
n_covars = covars.shape[1]
design_matrix = np.hstack((tested_vars, covars))
else:
n_covars = 0
design_matrix = tested_vars
mask_covars = np.ones(n_regressors + n_covars, dtype=bool)
mask_covars[:n_regressors] = False
test_matrix = np.array([[1.] + [0.] * n_covars])
### t-values computation
try: # try with statsmodels is available (more concise)
from statsmodels.regression.linear_model import OLS
t_values = np.empty((n_targets, n_regressors))
for i in range(n_targets):
current_target = target_vars[:, i].reshape((-1, 1))
for j in range(n_regressors):
current_tested_mask = mask_covars.copy()
current_tested_mask[j] = True
current_design_matrix = design_matrix[:, current_tested_mask]
ols_fit = OLS(current_target, current_design_matrix).fit()
t_values[i, j] = np.ravel(ols_fit.t_test(test_matrix).tvalue)
except: # use linalg if statsmodels is not available
from numpy import linalg
lost_dof = n_covars + 1 # fit all tested variates independently
t_values = np.empty((n_targets, n_regressors))
for i in range(n_regressors):
current_tested_mask = mask_covars.copy()
current_tested_mask[i] = True
current_design_matrix = design_matrix[:, current_tested_mask]
invcov = linalg.pinv(current_design_matrix)
normalized_cov = np.dot(invcov, invcov.T)
t_val_denom_aux = np.diag(
np.dot(test_matrix, np.dot(normalized_cov, test_matrix.T)))
t_val_denom_aux = t_val_denom_aux.reshape((-1, 1))
for j in range(n_targets):
current_target = target_vars[:, j].reshape((-1, 1))
res_lstsq = linalg.lstsq(current_design_matrix, current_target)
residuals = (current_target
- np.dot(current_design_matrix, res_lstsq[0]))
t_val_num = np.dot(test_matrix, res_lstsq[0])
t_val_denom = np.sqrt(
np.sum(residuals ** 2, 0) / float(n_samples - lost_dof)
* t_val_denom_aux)
t_values[j, i] = np.ravel(t_val_num / t_val_denom)
return t_values
### Tests t-scores computation ################################################
def test_t_score_with_covars_and_normalized_design_nocovar(random_state=0):
rng = check_random_state(random_state)
### Normalized data
n_samples = 50
# generate data
var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples)
var2 = rng.randn(n_samples, 1)
var2 = var2 / np.sqrt(np.sum(var2 ** 2, 0)) # normalize
# compute t-scores with nilearn's routine
t_val_own = t_score_with_covars_and_normalized_design(var1, var2)
# compute t-scores with linalg or statsmodels
t_val_alt = get_tvalue_with_alternative_library(var1, var2)
assert_array_almost_equal(t_val_own, t_val_alt)
def test_t_score_with_covars_and_normalized_design_withcovar(random_state=0):
"""
"""
rng = check_random_state(random_state)
### Normalized data
n_samples = 50
# generate data
var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples) # normalized
var2 = rng.randn(n_samples, 1)
var2 = var2 / np.sqrt(np.sum(var2 ** 2, 0)) # normalize
covars = np.eye(n_samples, 3) # covars is orthogonal
covars[3] = -1 # covars is orthogonal to var1
covars = orthonormalize_matrix(covars)
# nilearn's t-score
own_score = t_score_with_covars_and_normalized_design(var1, var2, covars)
# compute t-scores with linalg or statmodels
ref_score = get_tvalue_with_alternative_library(var1, var2, covars)
assert_array_almost_equal(own_score, ref_score)
### Tests normalize function ##################################################
# Is actually tested with doctests, but here are some additional tests.
def test_normalize_with_null_columns():
"""Check that the normalize function fails when matrix has null columns.
"""
X = np.array([[1, 2, 0], [0, 1, 0], [1, 1, 0]])
assert_raises(ValueError, normalize_matrix_on_axis, X)
def test_normalize_bad_dimensions():
"""Check that the normalize function fails when matrix has bad dimension.
"""
X = np.arange(6)
assert_raises(ValueError, normalize_matrix_on_axis, X)
X = np.arange(12).reshape((3, 2, 2))
assert_raises(ValueError, normalize_matrix_on_axis, X)
def test_normalize_bad_axis():
"""Check that the normalize function fails when bad axis is provided.
"""
X = np.array([[1, 2], [0, 1], [1, 1]])
assert_raises(ValueError, normalize_matrix_on_axis, X, axis=3)
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/tests/test_cross_validation.py | 3 | 43729 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
@ignore_warnings
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1./n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_leave_label_out_changing_labels():
"""Check that LeaveOneLabelOut and LeavePLabelOut work normally if
the labels variable is changed before calling __iter__"""
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5, indices=True)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_masks = cval.KFold(len(y), 5, indices=False)
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
"""Function to test that the values are passed correctly to the
classifier arguments for non-array type
"""
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=False)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=False)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=False)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=False)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=False)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=False)
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
assert_equal(np.asarray(train).dtype.kind, 'b')
assert_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=True)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=True)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=True)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=True)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=True)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=True)
# Bootstrap as a cross-validation is deprecated
b = assert_warns(DeprecationWarning, cval.Bootstrap, 2)
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
@ignore_warnings
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind),
(lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask)[0], train_ind)
assert_array_equal(np.where(test_mask)[0], test_ind)
@ignore_warnings
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=0.6,
test_size=0.5)
@ignore_warnings
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
@ignore_warnings
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0]/2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0]/2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0]/2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval._check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval._check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval._check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
cv = cval._check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval._check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval._check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1/2, 3/4, 1/2, 1/3])
assert_almost_equal(score_macro, [1, 1/2, 3/4, 1/2, 1/4])
assert_almost_equal(score_samples, [1, 1/2, 3/4, 1/2, 1/4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
henry-ngo/VIP | vip_hci/negfc/simplex_optim.py | 1 | 16676 | #! /usr/bin/env python
"""
Module with simplex (Nelder-Mead) optimization for defining the flux and
position of a companion using the Negative Fake Companion.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from .simplex_fmerit import chisquare
from ..var import frame_center
from ..conf import time_ini, timing, sep
__all__ = ['firstguess_from_coord',
'firstguess_simplex',
'firstguess']
def firstguess_from_coord(planet, center, cube, angs, PLSC, psf,
fwhm, annulus_width, aperture_radius, ncomp,
cube_ref=None, svd_mode='lapack', scaling=None,
fmerit='sum', collapse='median', f_range=None,
display=False, verbose=True, save=False, **kwargs):
"""
Determine a first guess for the flux of a companion at a given position
in the cube by doing a simple grid search evaluating the reduced chi2.
Parameters
----------
planet: numpy.array
The (x,y) position of the planet in the pca processed cube.
center: numpy.array
The (x,y) position of the cube center.
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
PLSC: float
The platescale, in arcsec per pixel.
psf: numpy.array
The scaled psf expressed as a numpy.array.
fwhm : float
The FHWM in pixels.
annulus_width: int, optional
The width in terms of the FWHM of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
ncomp: int
The number of principal components.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
f_range: numpy.array, optional
The range of flux tested values. If None, 20 values between 0 and 5000
are tested.
display: boolean, optional
If True, the figure chi2 vs. flux is displayed.
verbose: boolean
If True, display intermediate info in the shell.
save: boolean, optional
If True, the figure chi2 vs. flux is saved.
kwargs: dict, optional
Additional parameters are passed to the matplotlib plot method.
Returns
-------
out : numpy.array
The radial coordinates and the flux of the companion.
"""
xy = planet-center
r0= np.sqrt(xy[0]**2+xy[1]**2)
theta0 = np.mod(np.arctan2(xy[1],xy[0])/np.pi*180,360)
if f_range is not None:
n = f_range.shape[0]
else:
n = 20
f_range = np.linspace(0,5000,n)
chi2r = []
if verbose:
print('Step | flux | chi2r')
counter = 0
for j, f_guess in enumerate(f_range):
chi2r.append(chisquare((r0,theta0,f_guess), cube, angs, PLSC, psf,
fwhm, annulus_width, aperture_radius,(r0,theta0),
ncomp, cube_ref, svd_mode, scaling, fmerit,
collapse))
if chi2r[j] > chi2r[j-1]: counter+=1
if counter == 4: break
if verbose:
print('{}/{} {:.3f} {:.3f}'.format(j+1,n,f_guess,chi2r[j]))
chi2r = np.array(chi2r)
f0 = f_range[chi2r.argmin()]
if display:
plt.figure(figsize=kwargs.pop('figsize',(8,4)))
plt.title(kwargs.pop('title',''))
plt.xlim(f_range[0], f_range[:chi2r.shape[0]].max())
plt.ylim(chi2r.min()*0.9, chi2r.max()*1.1)
plt.plot(f_range[:chi2r.shape[0]],chi2r,
linestyle = kwargs.pop('linestyle','-'),
color = kwargs.pop('color','gray'),
marker = kwargs.pop('marker','.'),
markerfacecolor='r', markeredgecolor='r', **kwargs)
plt.xlabel('flux')
plt.ylabel(r'$\chi^2_{r}$')
plt.grid('on')
if save:
plt.savefig('chi2rVSflux.pdf')
if display:
plt.show()
return (r0,theta0,f0)
def firstguess_simplex(p, cube, angs, psf, plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=None, svd_mode='lapack',
scaling=None, fmerit='sum', collapse='median', p_ini=None,
options=None, verbose=False, **kwargs):
"""
Determine the position of a companion using the negative fake companion
technique and a standard minimization algorithm (Default=Nelder-Mead) .
Parameters
----------
p : np.array
Estimate of the candidate position.
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psf: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
ncomp: int
The number of principal components.
fwhm : float
The FHWM in pixels.
annulus_width: int, optional
The width in terms of the FWHM of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
p_ini : np.array
Position (r, theta) of the circular aperture center.
options: dict, optional
The scipy.optimize.minimize options.
verbose : boolean, optional
If True, informations are displayed in the shell.
Returns
-------
out : scipy.optimize.minimize solution object
The solution of the minimization algorithm.
"""
if verbose:
print('')
print('{} minimization is running...'.format(options.get('method','Nelder-Mead')))
if p_ini is None:
p_ini = p
solu = minimize(chisquare, p, args=(cube, angs, plsc, psf, fwhm, annulus_width,
aperture_radius, p_ini, ncomp, cube_ref,
svd_mode, scaling, fmerit, collapse),
method = options.pop('method','Nelder-Mead'),
options=options, **kwargs)
if verbose: print(solu)
return solu
def firstguess(cube, angs, psfn, ncomp, plsc, planets_xy_coord, fwhm=4,
annulus_width=3, aperture_radius=4, cube_ref=None,
svd_mode='lapack', scaling=None, fmerit='sum', collapse='median',
p_ini=None, f_range=None, simplex=True, simplex_options=None,
display=False, verbose=True, save=False, figure_options=None):
""" Determines a first guess for the position and the flux of a planet.
We process the cube without injecting any negative fake companion.
This leads to the visual detection of the planet(s). For each of them,
one can estimate the (x,y) coordinates in pixel for the position of the
star, as well as the planet(s).
From the (x,y) coordinates in pixels for the star and planet(s), we can
estimate a preliminary guess for the position and flux for each planet
by using the method "firstguess_from_coord". The argument "f_range" allows
to indicate prior limits for the flux (optional, default: None).
This step can be reiterate to refine the preliminary guess for the flux.
We can go a step further by using a Simplex Nelder_Mead minimization to
estimate the first guess based on the preliminary guess.
Parameters
----------
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfn: numpy.array
The centered and normalized (flux in a 1*FWHM aperture must equal 1)
PSF 2d-array.
ncomp: int
The number of principal components.
plsc: float
The platescale, in arcsec per pixel.
planet_xy_coord: array or list
The list of (x,y) positions of the planets.
fwhm : float, optional
The FHWM in pixels.
annulus_width: int, optional
The width in terms of the FWHM of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
p_ini: numpy.array
Position (r, theta) of the circular aperture center.
f_range: numpy.array, optional
The range of flux tested values. If None, 20 values between 0 and 5000
are tested.
simplex: boolean, optional
If True, the Nelder-Mead minimization is performed after the flux grid
search.
simplex_options: dict, optional
The scipy.optimize.minimize options.
display: boolean, optional
If True, the figure chi2 vs. flux is displayed.
verbose: boolean
If True, display intermediate info in the shell.
save: boolean, optional
If True, the figure chi2 vs. flux is saved.
figure_options: dict, optional
Additional parameters are passed to the matplotlib plot method.
Returns
-------
out : The radial coordinates and the flux of the companion.
WARNING: POLAR ANGLE IS NOT THE CONVENTIONAL NORTH-TO-EAST P.A.
"""
if verbose: start_time = time_ini()
if figure_options is None:
figure_options = {'color':'gray', 'marker':'.',
'title':r'$\chi^2_{r}$ vs flux'}
planets_xy_coord = np.array(planets_xy_coord)
n_planet = planets_xy_coord.shape[0]
center_xy_coord = np.array(frame_center(cube[0]))
if f_range is None:
f_range = np.linspace(0,5000,20)
if simplex_options is None:
simplex_options = {'xtol':1e-1, 'maxiter':500, 'maxfev':1000}
r_0 = np.zeros(n_planet)
theta_0 = np.zeros_like(r_0)
f_0 = np.zeros_like(r_0)
for index_planet in range(n_planet):
if verbose:
print('')
print(sep)
print(' Planet {} '.format(index_planet))
print(sep)
print('')
msg2 = 'Planet {}: flux estimation at the position [{},{}], running ...'
print(msg2.format(index_planet,planets_xy_coord[index_planet,0],
planets_xy_coord[index_planet,1]))
res_init = firstguess_from_coord(planets_xy_coord[index_planet],
center_xy_coord, cube, angs, plsc, psfn,
fwhm, annulus_width, aperture_radius,
ncomp, f_range=f_range,
cube_ref=cube_ref, svd_mode=svd_mode,
scaling=scaling, fmerit=fmerit,
collapse=collapse, display=display,
verbose=verbose, save=save,
**figure_options)
r_pre, theta_pre, f_pre = res_init
if verbose:
msg3 = 'Planet {}: preliminary guess: (r, theta, f)=({:.1f}, {:.1f}, {:.1f})'
print(msg3.format(index_planet,r_pre, theta_pre, f_pre))
if simplex:
if verbose:
msg4 = 'Planet {}: Simplex Nelder-Mead minimization, running ...'
print(msg4.format(index_planet))
res = firstguess_simplex((r_pre,theta_pre,f_pre), cube, angs, psfn,
plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
fmerit=fmerit, collapse=collapse, p_ini=p_ini,
options=simplex_options, verbose=False)
r_0[index_planet], theta_0[index_planet], f_0[index_planet] = res.x
if verbose:
msg5 = 'Planet {}: Success: {}, nit: {}, nfev: {}, chi2r: {}'
print(msg5.format(index_planet,res.success,res.nit,res.nfev,
res.fun))
print('message: {}'.format(res.message))
else:
if verbose:
msg4bis = 'Planet {}: Simplex Nelder-Mead minimization skipped.'
print(msg4bis.format(index_planet))
r_0[index_planet] = r_pre
theta_0[index_planet] = theta_pre
f_0[index_planet] = f_pre
if verbose:
centy, centx = frame_center(cube[0])
posy = r_0 * np.sin(np.deg2rad(theta_0[index_planet])) + centy
posx = r_0 * np.cos(np.deg2rad(theta_0[index_planet])) + centx
msg6 = 'Planet {}: simplex result: (r, theta, f)=({:.3f}, {:.3f}'
msg6 += ', {:.3f}) at \n (X,Y)=({:.2f}, {:.2f})'
print(msg6.format(index_planet, r_0[index_planet],
theta_0[index_planet], f_0[index_planet], posx[0], posy[0]))
if verbose:
print('\n', sep, '\nDONE !\n', sep)
timing(start_time)
return (r_0,theta_0,f_0)
| mit |
amueller/scipy-2017-sklearn | notebooks/figures/tree_plotting.py | 2 | 20503 | import numpy as np
from numbers import Integral
from sklearn.externals import six
from sklearn.tree.export import _color_brew, _criterion, _tree
def plot_tree(decision_tree, max_depth=None, feature_names=None,
class_names=None, label='all', filled=False,
leaves_parallel=False, impurity=True, node_ids=False,
proportion=False, rotate=False, rounded=False,
special_characters=False, precision=3, ax=None, fontsize=None):
"""Plot a decision tree.
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, optional (default=3)
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
ax : matplotlib axis, optional (default=None)
Axes to plot to. If None, use current axis.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> plot_tree(clf) # doctest: +SKIP
"""
exporter = _MPLTreeExporter(
max_depth=max_depth, feature_names=feature_names,
class_names=class_names, label=label, filled=filled,
leaves_parallel=leaves_parallel, impurity=impurity, node_ids=node_ids,
proportion=proportion, rotate=rotate, rounded=rounded,
special_characters=special_characters, precision=precision,
fontsize=fontsize)
exporter.export(decision_tree, ax=ax)
class _BaseTreeExporter(object):
def get_color(self, value):
# Find the appropriate color & intensity for a node
if self.colors['bounds'] is None:
# Classification tree
color = list(self.colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = ((sorted_values[0] - sorted_values[1])
/ (1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(self.colors['rgb'][0])
alpha = ((value - self.colors['bounds'][0]) /
(self.colors['bounds'][1] - self.colors['bounds'][0]))
# unpack numpy scalars
alpha = float(alpha)
# compute the color as alpha against white
color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
# Return html color code in #RRGGBB format
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def get_fill_color(self, tree, node_id):
# Fetch appropriate color for node
if 'rgb' not in self.colors:
# Initialize colors and bounds if required
self.colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
self.colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
self.colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
return self.get_color(node_val)
def node_to_str(self, tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (self.label == 'root' and node_id == 0) or self.label == 'all'
characters = self.characters
node_string = characters[-1]
# Write node ID
if self.node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if self.feature_names is not None:
feature = self.feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
self.precision),
characters[4])
# Write impurity
if self.impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], self.precision))
+ characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if self.proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if self.proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, self.precision)
elif self.proportion:
# Classification
value_text = np.around(value, self.precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, self.precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (self.class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if self.class_names is not True:
class_name = self.class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string.endswith(characters[4]):
node_string = node_string[:-len(characters[4])]
return node_string + characters[5]
class _MPLTreeExporter(_BaseTreeExporter):
def __init__(self, max_depth=None, feature_names=None,
class_names=None, label='all', filled=False,
leaves_parallel=False, impurity=True, node_ids=False,
proportion=False, rotate=False, rounded=False,
special_characters=False, precision=3, fontsize=None):
self.max_depth = max_depth
self.feature_names = feature_names
self.class_names = class_names
self.label = label
self.filled = filled
self.leaves_parallel = leaves_parallel
self.impurity = impurity
self.node_ids = node_ids
self.proportion = proportion
self.rotate = rotate
self.rounded = rounded
self.special_characters = special_characters
self.precision = precision
self.fontsize = fontsize
self._scaley = 10
# validate
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# The depth of each node for plotting with 'leaf' option
self.ranks = {'leaves': []}
# The colors to render each node with
self.colors = {'bounds': None}
self.characters = ['#', '[', ']', '<=', '\n', '', '']
self.bbox_args = dict(fc='w')
if self.rounded:
self.bbox_args['boxstyle'] = "round"
self.arrow_args = dict(arrowstyle="<-")
def _make_tree(self, node_id, et):
# traverses _tree.Tree recursively, builds intermediate
# "_reingold_tilford.Tree" object
name = self.node_to_str(et, node_id, criterion='entropy')
if (et.children_left[node_id] != et.children_right[node_id]):
children = [self._make_tree(et.children_left[node_id], et),
self._make_tree(et.children_right[node_id], et)]
else:
return Tree(name, node_id)
return Tree(name, node_id, *children)
def export(self, decision_tree, ax=None):
import matplotlib.pyplot as plt
from matplotlib.text import Annotation
if ax is None:
ax = plt.gca()
ax.set_axis_off()
my_tree = self._make_tree(0, decision_tree.tree_)
dt = buchheim(my_tree)
self._scalex = 1
self.recurse(dt, decision_tree.tree_, ax)
anns = [ann for ann in ax.get_children()
if isinstance(ann, Annotation)]
# get all the annotated points
xys = [ann.xyann for ann in anns]
mins = np.min(xys, axis=0)
maxs = np.max(xys, axis=0)
ax.set_xlim(mins[0], maxs[0])
ax.set_ylim(maxs[1], mins[1])
if self.fontsize is None:
# get figure to data transform
inv = ax.transData.inverted()
renderer = ax.figure.canvas.get_renderer()
# update sizes of all bboxes
for ann in anns:
ann.update_bbox_position_size(renderer)
# get max box width
widths = [inv.get_matrix()[0, 0]
* ann.get_bbox_patch().get_window_extent().width
for ann in anns]
# get minimum max size to not be too big.
max_width = max(max(widths), 1)
# adjust fontsize to avoid overlap
# width should be around 1 in data coordinates
size = anns[0].get_fontsize() / max_width
for ann in anns:
ann.set_fontsize(size)
def recurse(self, node, tree, ax, depth=0):
kwargs = dict(bbox=self.bbox_args, ha='center', va='center',
zorder=100 - 10 * depth)
if self.fontsize is not None:
kwargs['fontsize'] = self.fontsize
xy = (node.x * self._scalex, node.y * self._scaley)
if self.max_depth is None or depth <= self.max_depth:
if self.filled:
kwargs['bbox']['fc'] = self.get_fill_color(tree,
node.tree.node_id)
if node.parent is None:
# root
ax.annotate(node.tree.node, xy, **kwargs)
else:
xy_parent = (node.parent.x * self._scalex,
node.parent.y * self._scaley)
kwargs["arrowprops"] = self.arrow_args
ax.annotate(node.tree.node, xy_parent, xy, **kwargs)
for child in node.children:
self.recurse(child, tree, ax, depth=depth + 1)
else:
xy_parent = (node.parent.x * self._scalex, node.parent.y *
self._scaley)
kwargs["arrowprops"] = self.arrow_args
kwargs['bbox']['fc'] = 'grey'
ax.annotate("\n (...) \n", xy_parent, xy, **kwargs)
class DrawTree(object):
def __init__(self, tree, parent=None, depth=0, number=1):
self.x = -1.
self.y = depth
self.tree = tree
self.children = [DrawTree(c, self, depth + 1, i + 1)
for i, c
in enumerate(tree.children)]
self.parent = parent
self.thread = None
self.mod = 0
self.ancestor = self
self.change = self.shift = 0
self._lmost_sibling = None
# this is the number of the node in its group of siblings 1..n
self.number = number
def left(self):
return self.thread or len(self.children) and self.children[0]
def right(self):
return self.thread or len(self.children) and self.children[-1]
def lbrother(self):
n = None
if self.parent:
for node in self.parent.children:
if node == self:
return n
else:
n = node
return n
def get_lmost_sibling(self):
if not self._lmost_sibling and self.parent and self != \
self.parent.children[0]:
self._lmost_sibling = self.parent.children[0]
return self._lmost_sibling
lmost_sibling = property(get_lmost_sibling)
def __str__(self):
return "%s: x=%s mod=%s" % (self.tree, self.x, self.mod)
def __repr__(self):
return self.__str__()
def buchheim(tree):
dt = firstwalk(DrawTree(tree))
min = second_walk(dt)
if min < 0:
third_walk(dt, -min)
return dt
def third_walk(tree, n):
tree.x += n
for c in tree.children:
third_walk(c, n)
def firstwalk(v, distance=1.):
if len(v.children) == 0:
if v.lmost_sibling:
v.x = v.lbrother().x + distance
else:
v.x = 0.
else:
default_ancestor = v.children[0]
for w in v.children:
firstwalk(w)
default_ancestor = apportion(w, default_ancestor, distance)
# print("finished v =", v.tree, "children")
execute_shifts(v)
midpoint = (v.children[0].x + v.children[-1].x) / 2
w = v.lbrother()
if w:
v.x = w.x + distance
v.mod = v.x - midpoint
else:
v.x = midpoint
return v
def apportion(v, default_ancestor, distance):
w = v.lbrother()
if w is not None:
# in buchheim notation:
# i == inner; o == outer; r == right; l == left; r = +; l = -
vir = vor = v
vil = w
vol = v.lmost_sibling
sir = sor = v.mod
sil = vil.mod
sol = vol.mod
while vil.right() and vir.left():
vil = vil.right()
vir = vir.left()
vol = vol.left()
vor = vor.right()
vor.ancestor = v
shift = (vil.x + sil) - (vir.x + sir) + distance
if shift > 0:
move_subtree(ancestor(vil, v, default_ancestor), v, shift)
sir = sir + shift
sor = sor + shift
sil += vil.mod
sir += vir.mod
sol += vol.mod
sor += vor.mod
if vil.right() and not vor.right():
vor.thread = vil.right()
vor.mod += sil - sor
else:
if vir.left() and not vol.left():
vol.thread = vir.left()
vol.mod += sir - sol
default_ancestor = v
return default_ancestor
def move_subtree(wl, wr, shift):
subtrees = wr.number - wl.number
# print(wl.tree, "is conflicted with", wr.tree, 'moving', subtrees,
# 'shift', shift)
# print wl, wr, wr.number, wl.number, shift, subtrees, shift/subtrees
wr.change -= shift / subtrees
wr.shift += shift
wl.change += shift / subtrees
wr.x += shift
wr.mod += shift
def execute_shifts(v):
shift = change = 0
for w in v.children[::-1]:
# print("shift:", w, shift, w.change)
w.x += shift
w.mod += shift
change += w.change
shift += w.shift + change
def ancestor(vil, v, default_ancestor):
# the relevant text is at the bottom of page 7 of
# "Improving Walker's Algorithm to Run in Linear Time" by Buchheim et al,
# (2002)
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.8757&rep=rep1&type=pdf
if vil.ancestor in v.parent.children:
return vil.ancestor
else:
return default_ancestor
def second_walk(v, m=0, depth=0, min=None):
v.x += m
v.y = depth
if min is None or v.x < min:
min = v.x
for w in v.children:
min = second_walk(w, m + v.mod, depth + 1, min)
return min
class Tree(object):
def __init__(self, node="", node_id=-1, *children):
self.node = node
self.width = len(node)
self.node_id = node_id
if children:
self.children = children
else:
self.children = []
| cc0-1.0 |
mjudsp/Tsallis | examples/ensemble/plot_random_forest_regression_multioutput.py | 28 | 2642 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <_multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <_multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
renesugar/arrow | python/pyarrow/serialization.py | 1 | 12689 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import six
import sys
import numpy as np
import pyarrow as pa
from pyarrow.compat import builtin_pickle, descr_to_dtype
from pyarrow.lib import SerializationContext, py_buffer
try:
import cloudpickle
except ImportError:
cloudpickle = builtin_pickle
# ----------------------------------------------------------------------
# Set up serialization for numpy with dtype object (primitive types are
# handled efficiently with Arrow's Tensor facilities, see
# python_to_arrow.cc)
def _serialize_numpy_array_list(obj):
if obj.dtype.str != '|O':
# Make the array c_contiguous if necessary so that we can call change
# the view.
if not obj.flags.c_contiguous:
obj = np.ascontiguousarray(obj)
return obj.view('uint8'), np.lib.format.dtype_to_descr(obj.dtype)
else:
return obj.tolist(), np.lib.format.dtype_to_descr(obj.dtype)
def _deserialize_numpy_array_list(data):
if data[1] != '|O':
assert data[0].dtype == np.uint8
return data[0].view(descr_to_dtype(data[1]))
else:
return np.array(data[0], dtype=np.dtype(data[1]))
def _serialize_numpy_matrix(obj):
if obj.dtype.str != '|O':
# Make the array c_contiguous if necessary so that we can call change
# the view.
if not obj.flags.c_contiguous:
obj = np.ascontiguousarray(obj.A)
return obj.A.view('uint8'), np.lib.format.dtype_to_descr(obj.dtype)
else:
return obj.A.tolist(), np.lib.format.dtype_to_descr(obj.dtype)
def _deserialize_numpy_matrix(data):
if data[1] != '|O':
assert data[0].dtype == np.uint8
return np.matrix(data[0].view(descr_to_dtype(data[1])),
copy=False)
else:
return np.matrix(data[0], dtype=np.dtype(data[1]), copy=False)
# ----------------------------------------------------------------------
# pyarrow.RecordBatch-specific serialization matters
def _serialize_pyarrow_recordbatch(batch):
output_stream = pa.BufferOutputStream()
with pa.RecordBatchStreamWriter(output_stream, schema=batch.schema) as wr:
wr.write_batch(batch)
return output_stream.getvalue() # This will also close the stream.
def _deserialize_pyarrow_recordbatch(buf):
with pa.RecordBatchStreamReader(buf) as reader:
return reader.read_next_batch()
# ----------------------------------------------------------------------
# pyarrow.Array-specific serialization matters
def _serialize_pyarrow_array(array):
# TODO(suquark): implement more effcient array serialization.
batch = pa.RecordBatch.from_arrays([array], [''])
return _serialize_pyarrow_recordbatch(batch)
def _deserialize_pyarrow_array(buf):
# TODO(suquark): implement more effcient array deserialization.
batch = _deserialize_pyarrow_recordbatch(buf)
return batch.columns[0]
# ----------------------------------------------------------------------
# pyarrow.Table-specific serialization matters
def _serialize_pyarrow_table(table):
output_stream = pa.BufferOutputStream()
with pa.RecordBatchStreamWriter(output_stream, schema=table.schema) as wr:
wr.write_table(table)
return output_stream.getvalue() # This will also close the stream.
def _deserialize_pyarrow_table(buf):
with pa.RecordBatchStreamReader(buf) as reader:
return reader.read_all()
def _pickle_to_buffer(x):
pickled = builtin_pickle.dumps(x, protocol=builtin_pickle.HIGHEST_PROTOCOL)
return py_buffer(pickled)
def _load_pickle_from_buffer(data):
as_memoryview = memoryview(data)
if six.PY2:
return builtin_pickle.loads(as_memoryview.tobytes())
else:
return builtin_pickle.loads(as_memoryview)
# ----------------------------------------------------------------------
# pandas-specific serialization matters
def _register_custom_pandas_handlers(context):
# ARROW-1784, faster path for pandas-only visibility
try:
import pandas as pd
except ImportError:
return
import pyarrow.pandas_compat as pdcompat
sparse_type_error_msg = (
'{0} serialization is not supported.\n'
'Note that {0} is planned to be deprecated '
'in pandas future releases.\n'
'See https://github.com/pandas-dev/pandas/issues/19239 '
'for more information.'
)
def _serialize_pandas_dataframe(obj):
if (pdcompat._pandas_api.has_sparse
and isinstance(obj, pd.SparseDataFrame)):
raise NotImplementedError(
sparse_type_error_msg.format('SparseDataFrame')
)
return pdcompat.dataframe_to_serialized_dict(obj)
def _deserialize_pandas_dataframe(data):
return pdcompat.serialized_dict_to_dataframe(data)
def _serialize_pandas_series(obj):
if (pdcompat._pandas_api.has_sparse
and isinstance(obj, pd.SparseSeries)):
raise NotImplementedError(
sparse_type_error_msg.format('SparseSeries')
)
return _serialize_pandas_dataframe(pd.DataFrame({obj.name: obj}))
def _deserialize_pandas_series(data):
deserialized = _deserialize_pandas_dataframe(data)
return deserialized[deserialized.columns[0]]
context.register_type(
pd.Series, 'pd.Series',
custom_serializer=_serialize_pandas_series,
custom_deserializer=_deserialize_pandas_series)
context.register_type(
pd.Index, 'pd.Index',
custom_serializer=_pickle_to_buffer,
custom_deserializer=_load_pickle_from_buffer)
if hasattr(pd.core, 'arrays'):
if hasattr(pd.core.arrays, 'interval'):
context.register_type(
pd.core.arrays.interval.IntervalArray,
'pd.core.arrays.interval.IntervalArray',
custom_serializer=_pickle_to_buffer,
custom_deserializer=_load_pickle_from_buffer)
if hasattr(pd.core.arrays, 'period'):
context.register_type(
pd.core.arrays.period.PeriodArray,
'pd.core.arrays.period.PeriodArray',
custom_serializer=_pickle_to_buffer,
custom_deserializer=_load_pickle_from_buffer)
if hasattr(pd.core.arrays, 'datetimes'):
context.register_type(
pd.core.arrays.datetimes.DatetimeArray,
'pd.core.arrays.datetimes.DatetimeArray',
custom_serializer=_pickle_to_buffer,
custom_deserializer=_load_pickle_from_buffer)
context.register_type(
pd.DataFrame, 'pd.DataFrame',
custom_serializer=_serialize_pandas_dataframe,
custom_deserializer=_deserialize_pandas_dataframe)
def register_torch_serialization_handlers(serialization_context):
# ----------------------------------------------------------------------
# Set up serialization for pytorch tensors
try:
import torch
def _serialize_torch_tensor(obj):
if obj.is_sparse:
# TODO(pcm): Once ARROW-4453 is resolved, return sparse
# tensor representation here
return (obj._indices().detach().numpy(),
obj._values().detach().numpy(), list(obj.shape))
else:
return obj.detach().numpy()
def _deserialize_torch_tensor(data):
if isinstance(data, tuple):
return torch.sparse_coo_tensor(data[0], data[1], data[2])
else:
return torch.from_numpy(data)
for t in [torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor,
torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.Tensor]:
serialization_context.register_type(
t, "torch." + t.__name__,
custom_serializer=_serialize_torch_tensor,
custom_deserializer=_deserialize_torch_tensor)
except ImportError:
# no torch
pass
def _register_collections_serialization_handlers(serialization_context):
def _serialize_deque(obj):
return list(obj)
def _deserialize_deque(data):
return collections.deque(data)
serialization_context.register_type(
collections.deque, "collections.deque",
custom_serializer=_serialize_deque,
custom_deserializer=_deserialize_deque)
def _serialize_ordered_dict(obj):
return list(obj.keys()), list(obj.values())
def _deserialize_ordered_dict(data):
return collections.OrderedDict(zip(data[0], data[1]))
serialization_context.register_type(
collections.OrderedDict, "collections.OrderedDict",
custom_serializer=_serialize_ordered_dict,
custom_deserializer=_deserialize_ordered_dict)
def _serialize_default_dict(obj):
return list(obj.keys()), list(obj.values()), obj.default_factory
def _deserialize_default_dict(data):
return collections.defaultdict(data[2], zip(data[0], data[1]))
serialization_context.register_type(
collections.defaultdict, "collections.defaultdict",
custom_serializer=_serialize_default_dict,
custom_deserializer=_deserialize_default_dict)
def _serialize_counter(obj):
return list(obj.keys()), list(obj.values())
def _deserialize_counter(data):
return collections.Counter(dict(zip(data[0], data[1])))
serialization_context.register_type(
collections.Counter, "collections.Counter",
custom_serializer=_serialize_counter,
custom_deserializer=_deserialize_counter)
def register_default_serialization_handlers(serialization_context):
# ----------------------------------------------------------------------
# Set up serialization for primitive datatypes
# TODO(pcm): This is currently a workaround until arrow supports
# arbitrary precision integers. This is only called on long integers,
# see the associated case in the append method in python_to_arrow.cc
serialization_context.register_type(
int, "int",
custom_serializer=lambda obj: str(obj),
custom_deserializer=lambda data: int(data))
if (sys.version_info < (3, 0)):
serialization_context.register_type(
long, "long", # noqa: F821
custom_serializer=lambda obj: str(obj),
custom_deserializer=lambda data: long(data)) # noqa: F821
serialization_context.register_type(
type(lambda: 0), "function",
pickle=True)
serialization_context.register_type(type, "type", pickle=True)
serialization_context.register_type(
np.matrix, 'np.matrix',
custom_serializer=_serialize_numpy_matrix,
custom_deserializer=_deserialize_numpy_matrix)
serialization_context.register_type(
np.ndarray, 'np.array',
custom_serializer=_serialize_numpy_array_list,
custom_deserializer=_deserialize_numpy_array_list)
serialization_context.register_type(
pa.Array, 'pyarrow.Array',
custom_serializer=_serialize_pyarrow_array,
custom_deserializer=_deserialize_pyarrow_array)
serialization_context.register_type(
pa.RecordBatch, 'pyarrow.RecordBatch',
custom_serializer=_serialize_pyarrow_recordbatch,
custom_deserializer=_deserialize_pyarrow_recordbatch)
serialization_context.register_type(
pa.Table, 'pyarrow.Table',
custom_serializer=_serialize_pyarrow_table,
custom_deserializer=_deserialize_pyarrow_table)
_register_collections_serialization_handlers(serialization_context)
_register_custom_pandas_handlers(serialization_context)
def default_serialization_context():
context = SerializationContext()
register_default_serialization_handlers(context)
return context
| apache-2.0 |
justincassidy/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
X0Leon/XQuant | xquant/utils/alternative.py | 1 | 1984 | # -*- coding: utf-8 -*-
"""
Monte Carlo模拟另类历史回测
@author: Leon Zhang
@version: 0.4
"""
import numpy as np
def reorder(a, chunks=1):
"""
重新排序组合
参数:
a: 一维序列,例如每天的收益率(return/PnL)
chunks: 分块数,例如周数或月数
返回:
重排后的序列
"""
a = np.array_split(a, chunks)
np.random.shuffle(a)
a = np.concatenate(a)
return a
def resample(a, chunks=1):
"""
有放回的随机抽样,如果a长度不能被chunks整除,则自动取1
参数:
a: 一维序列,例如每天的收益率
chunks: 分块数,要求a长度可以被其整分
返回:
重新随机生成的序列
"""
try:
a = np.split(a, chunks)
index = np.random.choice(range(chunks), size=chunks)
a = np.concatenate([a[i] for i in index])
except:
a = np.random.choice(a, size=len(a))
return a
def monte_carlo(a, chunks=1, times=20, shuffle_type='reorder'):
"""
蒙特卡洛模拟对序列a进行重新抽样组合
参数:
a: 一维序列
chunks: 分块数,type='resample'时应该要求len(a)可以整除chunks
times: 模拟的次数
type: 'reorder'或‘resample'分别为无放回和有放回抽样
返回:
所有随机序列的list
"""
if shuffle_type == 'reorder':
shuffle = reorder
elif shuffle_type == 'resample':
shuffle = resample
else:
return None
list_a = []
for _ in range(times):
list_a.append(shuffle(a, chunks=chunks))
return list_a
if __name__ == '__main__':
# 示例
import matplotlib.pyplot as plt
ret = (np.random.randn(100) + 0.1) / 100
equity_curve = (1 + ret).cumprod()
plt.plot(equity_curve, color='blue')
generated_ret = monte_carlo(ret, chunks=20)
for ret in generated_ret:
curve = (1 + ret).cumprod()
plt.plot(curve, color='grey', alpha=0.5) | mit |
jarogames/gregory | gregory/pi/prerequisites.py | 1 | 2700 | #!/usr/bin/python3
import subprocess as sp
import os # devnul
DEBUG=True
packages={}
#### WHAT I CONSIDER CRUCIAL FOR RPI ##################
packages['blessings']='from blessings import Terminal ... color terminal, moving cursor'
packages['prettytable']='ASCII pretty tables'
packages['matplotlib']='import matplotlib.pyplot as plt'
packages['numpy']='import numpy as np'
packages['pandas']='import pandas as pd'
packages['pyserial']='? serial read, arduino...?'
packages['pyzmq']='! also aptitude install libzmq5 libczmq3'
packages['serf_master']='serf package python module'
packages['terminaltables']='tables in terminal'
packages['colorclass']='colors for terminal tables'
#packages['']='serf package python module'
# packages['xvbfwrapper']=''
# packages['zenipy']=''
# packages['youtube-dl']=''
# packages['mps-youtube']=''
# packages['staticmap']=''
# packages['scipy']=''
# packages['iminuit']=''
# packages['h5py?']=''
# packages['Flask']=''
# packages['imutils']='opencv image transforms'
# packages['logzero']=''
# packages['lxml']=''
# packages['pexpect']='commandline child communication (ftp...)'
# packages['Pillow']='python image lib, fork'
# packages['svgwrite']='SVG drawing module'
# packages['pyswarm']='particle swarm optimization w.constraints/pyswarms'
# packages['']=''
# packages['']=''
# packages['']=''
def check_prerequisites():
if DEBUG:print("F--- Prerequisites: -------------------")
CMD="pip3 list --format=legacy" # pip 9.0.1 starts to complain
CMD="pip3 list" # pip 1.5 doesnt know --format
all=sp.check_output( CMD.split() ).decode("utf8").rstrip().split("\n")
#print(all)
all=[ x.split()[0] for x in all ]
#print(all)
needed=[]
for p in packages.keys():
if DEBUG:print("ck ... {:15s} ...".format(p),end="")
if p in all:
if DEBUG:print("[OK]")
else:
if DEBUG:print("[!!]")
needed.append(p)
################# FANTSTIC ERROR CODE CHECK ######
# FNULL=open( os.devnull,"w")
# try:
# res=sp.check_call( CMD.split() , stdout=FNULL )
# except sp.CalledProcessError:
# print("PAC...",p,"...",packages[p])
# needed.append( p )
# #if DEBUG:print(":... {}".format(p) )
return needed
def install_prerequisites( prers ):
errors=[]
for p in prers:
CMD="pip3 install "+p+" --user"
if DEBUG:print("D...",CMD)
try:
res=sp.check_call( CMD.split() )
except sp.CalledProcessError:
print("ERR...",p,"...",packages[p])
errors.append( p )
if DEBUG:print("ERRORS:",errors)
return errors
| gpl-2.0 |
JohanComparat/pySU | spm/bin_SMF/mass_density.py | 1 | 14462 | import astropy.cosmology as co
cosmo=co.Planck15
import astropy.io.fits as fits
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['agg.path.chunksize'] = 2000000
matplotlib.rcParams.update({'font.size': 12})
import matplotlib.pyplot as p
import numpy as n
import os
import sys
from scipy.integrate import quad
from scipy.interpolate import interp1d
# stat functions
ld = lambda selection : len(selection.nonzero()[0])
# global cosmo quantities
#run mass_density.py 0.2 0.5
#run mass_density.py 0.5 0.8
#run mass_density.py 0.8 1.1
#run mass_density.py 1.1 1.5
z_min = float(sys.argv[1])
z_max = float(sys.argv[2])
logm_m13_05, smf_m13_05 = n.loadtxt(os.path.join(os.environ['OBS_REPO'], 'spm/literature', "M13_cmass_045_055.txt"), unpack=True)
logm_m13_065, smf_m13_65 = n.loadtxt(os.path.join(os.environ['OBS_REPO'], 'spm/literature', "M13_cmass_06_07.txt") , unpack=True)
if z_min < 0.4:
path_2_comp = os.path.join(os.environ['GIT_PYSU'], 'data','zmin0.20_completness.txt')
elif z_min < 0.65:
path_2_comp = os.path.join(os.environ['GIT_PYSU'], 'data','zmin0.50_completness.txt')
else :
path_2_comp = os.path.join(os.environ['GIT_PYSU'], 'data','zmin0.80_completness.txt')
# log10(M/Msun), Ratios for r<[17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0]
DATA_GALFORM = n.loadtxt(path_2_comp, unpack=True)
gf = {}
gf['r17']= interp1d(DATA_GALFORM[0], DATA_GALFORM[1])
gf['r18']= interp1d(DATA_GALFORM[0], DATA_GALFORM[2])
gf['r19']= interp1d(DATA_GALFORM[0], DATA_GALFORM[3])
gf['r20']= interp1d(DATA_GALFORM[0], DATA_GALFORM[4])
gf['r21']= interp1d(DATA_GALFORM[0], DATA_GALFORM[5])
gf['r22']= interp1d(DATA_GALFORM[0], DATA_GALFORM[6])
gf['r23']= interp1d(DATA_GALFORM[0], DATA_GALFORM[7])
gf['r24']= interp1d(DATA_GALFORM[0], DATA_GALFORM[8])
gf['r25']= interp1d(DATA_GALFORM[0], DATA_GALFORM[9])
volume_per_deg2 = ( cosmo.comoving_volume(z_max) - cosmo.comoving_volume(z_min) ) * n.pi / 129600.
volume_per_deg2_val = volume_per_deg2.value
smf_ilbert13 = lambda M, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s : ( phi_1s * (M/M_star) ** alpha_1s + phi_2s * (M/M_star) ** alpha_2s ) * n.e ** (-M/M_star) * (M/ M_star)
path_ilbert13_SMF = os.path.join(os.environ['OBS_REPO'], 'spm/literature', "ilbert_2013_mass_function_params.txt")
zmin, zmax, N, M_comp, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s, log_rho_s = n.loadtxt(path_ilbert13_SMF, unpack=True)
if z_min == 0.2 :
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0] )
area_deep2 = 0.5
if z_min == 0.5 :
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[1], phi_1s[1]*10**(-3), alpha_1s[1], phi_2s[1]*10**(-3), alpha_2s[1] )
area_deep2 = 2.0
if z_min == 0.8 :
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[2], phi_1s[2]*10**(-3), alpha_1s[2], phi_2s[2]*10**(-3), alpha_2s[2] )
area_deep2 = 2.0
if z_min == 1.1 :
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[3], phi_1s[3]*10**(-3), alpha_1s[3], phi_2s[3]*10**(-3), alpha_2s[3] )
area_deep2 = 2.0
#print 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0]
fun = lambda mass :smf01(mass) # mass *
masses = 10**n.arange(7,12,0.1)
total_mass_per_unit_volume = n.array([quad(fun, mmin, 10**13)[0] for mmin in masses])
area_sdss = 7900.
area_boss = 10000.
area_cosmos = 1.52
volume_sdss = 7900. * volume_per_deg2_val
volume_boss = 10000. * volume_per_deg2_val
volume_cosmos = 1.52 * volume_per_deg2_val
volume_deep2 = 2.78 * volume_per_deg2_val
out_dir = os.path.join('/data42s/comparat/firefly/v1_1_0/figures/mass-functions')
imfs = ["Chabrier_ELODIE_", "Chabrier_MILES_", "Chabrier_STELIB_", "Kroupa_ELODIE_", "Kroupa_MILES_", "Kroupa_STELIB_", "Salpeter_ELODIE_", "Salpeter_MILES_", "Salpeter_STELIB_" ]
cosmos_dir = os.path.join(os.environ['OBS_REPO'], 'COSMOS', 'catalogs' )
path_2_cosmos_cat = os.path.join( cosmos_dir, "photoz-2.0", "photoz_vers2.0_010312.fits")
#path_2_cosmos_cat = os.path.join( cosmos_dir, "COSMOS2015_Laigle+_v1.1.fits.gz")
sdss_dir = os.path.join(os.environ['OBS_REPO'], 'SDSS')
path_2_sdss_cat = os.path.join( sdss_dir, '26', 'catalogs', "FireFly.fits" )
path_2_eboss_cat = os.path.join( sdss_dir, 'v5_10_0', 'catalogs', "FireFly.fits" )
# DEEP SURVEYS
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.v2.fits" )
# OPENS THE CATALOGS
deep2 = fits.open(path_2_deep2_cat)[1].data
sdss = fits.open(path_2_sdss_cat)[1].data
boss = fits.open(path_2_eboss_cat)[1].data
cosmos = fits.open(path_2_cosmos_cat)[1].data
def get_basic_stat_DR14(catalog, z_name, z_err_name, class_name, zwarning, zflg_val, prefix, err_max):
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val)
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < err_max )
m_catalog = n.log10(catalog[prefix+'stellar_mass'])
w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
return catalog_sel, m_catalog, w_catalog
def get_basic_stat_DEEP2(deep2, IMF, err_max):
z_flg = 'ZQUALITY'
z_name = 'ZBEST'
stellar_mass = IMF+'stellar_mass'
deep2_zOk = (deep2[z_name] > z_min) & (deep2[z_name] < z_max) & (deep2[z_flg]>=2.) & (deep2['SSR']>0) & (deep2['TSR']>0) & (deep2['SSR']<=1.0001) & (deep2['TSR']<=1.0001)
ok_deep2_02 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & ( - n.log10(deep2[stellar_mass+'_low']) + n.log10(deep2[stellar_mass+'_up']) < err_max)
return ok_deep2_02, n.log10(deep2[stellar_mass][ok_deep2_02]), 1./(deep2['SSR'][ok_deep2_02]*deep2['TSR'][ok_deep2_02])
def get_hist(masses, weights, mbins):
NN = n.histogram(masses, mbins)[0]
NW = n.histogram(masses, mbins, weights = weights)[0]
xx = (mbins[1:] + mbins[:-1])/2.
return xx, NW, NN**(-0.5)*NW
dlog10m = 0.25
mbins = n.arange(8.7,12.5,dlog10m)
def plot_smf_b(IMF="Chabrier_ELODIE_", err_max=0.4):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', 0., IMF, err_max)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
#sel = (y>0)&(ye>0)&(y>2*ye)
return x, y, ye #x[sel], y[sel], ye[sel]
#p.errorbar(x, y, yerr = ye, label=IMF[:-1], lw=1)
def plot_smf_s(IMF="Chabrier_ELODIE_", err_max=0.4):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 0., IMF, err_max)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_sdss*volume_per_deg2_val), mbins = mbins)
sel = (y>0)&(ye>0)&(y>2*ye)
return x, y, ye#x[sel], y[sel], ye[sel]
#p.errorbar(x, y, yerr = ye, label=IMF[:-1], lw=1)
def plot_smf_d(IMF="Chabrier_ELODIE_", err_max=0.4, area_deep2=0.5):
boss_sel, boss_m, boss_w = get_basic_stat_DEEP2(deep2, IMF, err_max)
x, y, ye = get_hist(boss_m, weights = boss_w/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
sel = (y>0)&(ye>0)&(y>2*ye)
return x, y, ye#x[sel], y[sel], ye[sel]
xa, ya, yea = plot_smf_b("Chabrier_ELODIE_", 0.2*2)
xb, yb, yeb = plot_smf_b("Chabrier_MILES_", 0.2*2)
xc, yc, yec = plot_smf_b("Chabrier_STELIB_", 0.2*2)
#xc, yc, yec = xc[1:-1], yc[1:-1], yec[1:-1]
xd, yd, yed = plot_smf_s("Chabrier_ELODIE_", 0.2*2)
xe, ye, yee = plot_smf_s("Chabrier_MILES_", 0.2*2)
xf, yf, yef = plot_smf_s("Chabrier_STELIB_", 0.2*2)
#xf, yf, yef = xf[2:], yf[2:], yef[2:]
xg, yg, yeg = plot_smf_d("Chabrier_ELODIE_", 0.2*2, area_deep2=area_deep2)
xh, yh, yeh = plot_smf_d("Chabrier_MILES_", 0.2*2, area_deep2=area_deep2)
#xh, yh, yeh = xh[:-1], yh[:-1], yeh[:-1]
xi, yi, yei = plot_smf_d("Chabrier_STELIB_", 0.2*2, area_deep2=area_deep2)
#xi, yi, yei = xi[:-1], yi[:-1], yei[:-1]
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
p.fill_between( mbins, y1=smf01(10**mbins)*0.77, y2=smf01(10**mbins)*1.23, color='g', alpha=0.5)
p.plot(mbins, smf01(10**mbins), label='Ilbert 13', color='g')
#p.plot(mbins, smf01(10**mbins)*gf['r18'](mbins), label='G17 r<18', ls='dashed', lw=2)
#p.plot(mbins, smf01(10**mbins)*gf['r19'](mbins), label='G17 r<19', ls='dashed', lw=2)
#p.plot(mbins, smf01(10**mbins)*gf['r20'](mbins), label='G17 r<20', ls='dashed', lw=2)
p.plot(logm_m13_05[:-3], 10**smf_m13_05[:-3], color='m', label='M13 z=0.5')
#p.plot(logm_m13_065, 10**smf_m13_65, label='M13 z=0.65')
print(ya, yb, yc)
print(yea, yeb, yec)
p.fill_between( xa, y1=n.min([ya-yea, yb-yeb, yc-yec], axis=0), y2=n.max([ya+yea, yb+yeb, yc+yec], axis=0), color='r', alpha=0.5)
p.plot(xa, n.mean([ya, yb, yc], axis=0), label=r'BOSS, eBOSS', color='r')
p.fill_between( xd, y1=n.min([yd-yed, ye-yee, yf-yef], axis=0), y2=n.max([yd+yed, ye+yee, yf+yef], axis=0), color='b', alpha=0.5)
p.plot(xd, n.mean([yd, ye, yf], axis=0), label=r'SDSS', color='b')
p.fill_between( xg, y1=n.min([yg-yeg, yh-yeh, yi-yei], axis=0), y2=n.max([yg+yeg, yh+yeh, yi+yei], axis=0), color='k', alpha=0.5)
p.plot(xg, n.mean([yg, yh, yi], axis=0), label=r'DEEP2', color='k')
p.title('Chabrier IMF '+str(z_min)+'<z<'+str(z_max))
p.xlabel(r"$\log_{10}$ (M / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=6, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.2))
p.grid()
p.savefig(os.path.join(out_dir, "firefly_SMF_BOSS_"+str(z_min)+'_z_'+str(z_max)+".png" ))
p.clf()
sys.exit()
imf = imfs[0]
stellar_mass = imf+'stellar_mass'
redshift_reliable_boss = (boss['CLASS_NOQSO'] == "GALAXY") & ( boss['Z_ERR_NOQSO'] > 0.0) & (boss['ZWARNING_NOQSO'] == 0) & (boss['Z_NOQSO']>0.001) & (boss['Z_NOQSO'] > boss['Z_ERR_NOQSO'] ) # (boss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_boss = (boss[stellar_mass+'_up'] > boss[stellar_mass+'_low'] ) & (boss[stellar_mass+'_up'] > 0. ) & ( boss[stellar_mass+'_low'] > 0. ) & (boss[stellar_mass+'_up'] < 1e14 ) & ( boss[stellar_mass+'_low'] < 1e14 )
mass_reliable_boss_02 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up']) - n.log10(boss[stellar_mass+'_low']))/2. < 0.2 )
mass_reliable_boss_04 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up']) - n.log10(boss[stellar_mass+'_low']))/2. < 0.4 )
ok_boss_02 = (error_reliable_boss) & (mass_reliable_boss_02) & (redshift_reliable_boss)& (boss['Z_NOQSO']>z_min)& (boss['Z_NOQSO']<z_max)
ok_boss_04 = (error_reliable_boss) & (mass_reliable_boss_04) & (redshift_reliable_boss)& (boss['Z_NOQSO']>z_min)& (boss['Z_NOQSO']<z_max)
redshift_reliable_sdss = (sdss['CLASS'] == "GALAXY") & ( sdss['Z_ERR'] > 0.0) & (sdss['ZWARNING'] == 0) & (sdss['Z'] > 0.001) & (sdss['Z'] > sdss['Z_ERR'] ) # (sdss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_sdss = (sdss[stellar_mass+'_up'] > sdss[stellar_mass+'_low'] ) & (sdss[stellar_mass+'_up'] > 0. ) & ( sdss[stellar_mass+'_low'] > 0. ) & (sdss[stellar_mass+'_up'] < 1e14 ) & ( sdss[stellar_mass+'_low'] < 1e14 )
mass_reliable_sdss_02 = (sdss[stellar_mass] > 1e6 ) & ( sdss[stellar_mass] < 1e14 ) & ((n.log10(sdss[stellar_mass+'_up']) - n.log10(sdss[stellar_mass+'_low']))/2. < 0.2 )
mass_reliable_sdss_04 = (sdss[stellar_mass] > 1e6 ) & ( sdss[stellar_mass] < 1e14 ) & ((n.log10(sdss[stellar_mass+'_up']) - n.log10(sdss[stellar_mass+'_low']))/2. < 0.4 )
ok_sdss_02 = (error_reliable_sdss) & (mass_reliable_sdss_02) & (redshift_reliable_sdss)& (sdss['Z']>z_min)& (sdss['Z']<z_max)
ok_sdss_04 = (error_reliable_sdss) & (mass_reliable_sdss_04) & (redshift_reliable_sdss)& (sdss['Z']>z_min)& (sdss['Z']<z_max)
z_flg = 'ZQUALITY'
z_name = 'ZBEST'
deep2_zOk = (deep2[z_name] > 0.001) & (deep2[z_flg]>=2.) & (deep2[z_name] < 1.7) & (deep2['SSR']>0) & (deep2['TSR']>0) & (deep2['SSR']<=1.0001) & (deep2['TSR']<=1.0001)
ok_deep2_02 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & (deep2[stellar_mass] >= deep2[stellar_mass+'_low'] ) & (deep2[stellar_mass] <= deep2[stellar_mass+'_up'] ) & ( - n.log10(deep2[stellar_mass+'_low']) + n.log10(deep2[stellar_mass+'_up']) < 0.4 )
ok_deep2_04 = (deep2_zOk) & (deep2[stellar_mass] < 10**14. ) & (deep2[stellar_mass] > 0. ) & (deep2[stellar_mass] >= deep2[stellar_mass+'_low'] ) & (deep2[stellar_mass] <= deep2[stellar_mass+'_up'] ) & ( - n.log10(deep2[stellar_mass+'_low']) + n.log10(deep2[stellar_mass+'_up']) < 0.8 )
total_mass_boss_02 = n.array([ n.sum(boss[stellar_mass][ok_boss_02 & (boss[stellar_mass] > mmin)]) for mmin in masses ])
total_mass_boss_04 = n.array([ n.sum(boss[stellar_mass][ok_boss_04 & (boss[stellar_mass] > mmin)]) for mmin in masses ])
total_mass_sdss_02 = n.array([ n.sum(sdss[stellar_mass][ok_sdss_02 & (sdss[stellar_mass] > mmin)]) for mmin in masses ])
total_mass_sdss_04 = n.array([ n.sum(sdss[stellar_mass][ok_sdss_04 & (sdss[stellar_mass] > mmin)]) for mmin in masses ])
total_mass_deep2_02 = n.array([ n.sum(deep2[stellar_mass][ok_deep2_02 & (deep2[stellar_mass] > mmin)]) for mmin in masses ])
total_mass_deep2_04 = n.array([ n.sum(deep2[stellar_mass][ok_deep2_04 & (deep2[stellar_mass] > mmin)]) for mmin in masses ])
p.figure(1, (4.5, 4.5))
p.axes([0.2,0.2,0.7,0.7])
total = total_mass_per_unit_volume * volume_boss
p.plot(masses, total_mass_boss_02/total, label='BOSS 0.2' )
#p.plot(masses, total_mass_boss_04/total, label='BOSS 0.4' )
total = total_mass_per_unit_volume * volume_sdss
p.plot(masses, total_mass_sdss_02/total, label='SDSS 0.2' )
#p.plot(masses, total_mass_sdss_04/total, label='SDSS 0.4' )
total = total_mass_per_unit_volume * volume_deep2
p.plot(masses, total_mass_deep2_02/total, label='DEEP2 0.2' )
#p.plot(masses, total_mass_deep2_04/total, label='DEEP2 0.4' )
p.legend(frameon=False, loc=0)
p.xlabel('Mmin')
p.ylabel('Mass fraction (M>Mmin)')
p.title(str(z_min)+'<z<'+str(z_max))
p.xscale('log')
p.yscale('log')
p.ylim((0.001,2))
p.xlim((1e7,10**(12.5)))
p.grid()
p.savefig(os.path.join(out_dir, "mass_density"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
| cc0-1.0 |
TomAugspurger/pandas | pandas/tests/indexes/period/test_factorize.py | 8 | 1267 | import numpy as np
from pandas import PeriodIndex
import pandas._testing as tm
class TestFactorize:
def test_factorize(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M"
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M"
)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-03", "2014-02", "2014-01"], freq="M")
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
glennq/scikit-learn | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/datasets/__init__.py | 61 | 3734 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
architecture-building-systems/CEAforArcGIS | cea/optimization/preprocessing/processheat.py | 2 | 2143 | """
Boiler Pre-treatment for Heat Processing
At the moment, process heat is excluded form the optimization process.
It is considered that whenever the case, the most competitive alternative is to have a dedicated natural gas boiler
"""
import pandas as pd
from cea.technologies import boiler
from cea.technologies.constants import BOILER_ETA_HP
from cea.constants import HOURS_IN_YEAR, WH_TO_J
def calc_pareto_Qhp(locator, total_demand, prices, lca):
"""
This function calculates the contribution to the pareto optimal results of process heating,
:param locator: locator class
:param total_demand: dataframe with building demand
:type locator: class
:type total_demand: class
:return: hpCosts, hpCO2, hpPrim
:rtype: tuple
"""
hpCosts = 0
hpCO2 = 0
hpPrim = 0
boiler_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="Boiler")
if total_demand["Qhpro_sys_MWhyr"].sum()>0:
df = total_demand[total_demand.Qhpro_sys_MWhyr != 0]
for name in df.Name :
# Extract process heat needs
Qhpro_sys_kWh = pd.read_csv(locator.get_demand_results_file(name), usecols=["Qhpro_sys_kWh"]).Qhpro_sys_kWh.values
Qnom_Wh = 0
Qannual_Wh = 0
# Operation costs / CO2 / Prim
for i in range(HOURS_IN_YEAR):
Qgas_Wh = Qhpro_sys_kWh[i] * 1E3 / BOILER_ETA_HP # [Wh] Assumed 0.9 efficiency
if Qgas_Wh < Qnom_Wh:
Qnom_Wh = Qgas_Wh
Qannual_Wh += Qgas_Wh
hpCosts += Qgas_Wh * prices.NG_PRICE # [CHF]
hpCO2 += Qgas_Wh * WH_TO_J / 1.0E6 * lca.NG_BACKUPBOILER_TO_CO2_STD / 1E3 # [ton CO2]
hpPrim += Qgas_Wh * WH_TO_J / 1.0E6 * lca.NG_BACKUPBOILER_TO_OIL_STD # [MJ-oil-eq]
# Investment costs
Capex_a_hp_USD, Opex_fixed_hp_USD, Capex_hp_USD = boiler.calc_Cinv_boiler(Qnom_Wh, 'BO1', boiler_cost_data)
hpCosts += (Capex_a_hp_USD + Opex_fixed_hp_USD)
else:
hpCosts = hpCO2 = hpPrim = 0
return hpCosts, hpCO2, hpPrim
| mit |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAData/QASeriesStruct.py | 2 | 2868 | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from copy import deepcopy
import numpy as np
import pandas as pd
class QA_DataStruct_Series():
def __init__(self, series):
self.series = series.sort_index()
if isinstance(series.index, pd.core.indexes.multi.MultiIndex):
self.if_multiindex=True
self.index = series.index.remove_unused_levels()
else:
self.if_multiindex=False
self.index = series.index
def __repr__(self):
return '< QA_DATASTRUCT_SEIRES >'
def __call__(self):
return self.series
@property
def code(self):
if self.if_multiindex:
return self.index.levels[1].tolist()
else:
return None
@property
def datetime(self):
if self.if_multiindex:
return self.index.levels[0].tolist()
elif (self.index,pd.core.indexes.datetimes.DatetimeIndex):
return self.index
else:
return None
@property
def date(self):
if self.if_multiindex:
return np.unique(self.index.levels[0].date).tolist()
elif (self.index,pd.core.indexes.datetimes.DatetimeIndex):
return np.unique(self.index.date).tolist()
else:
return None
def new(self, series):
temp = deepcopy(self)
temp.__init__(series)
return temp
def select_code(self, code):
return self.new(self.series.loc[(slice(None), code)])
def select_time(self, start, end=None):
if end is None:
return self.new(self.series.loc[(pd.Timestamp(start), slice(None))])
else:
return self.new(self.series.loc[(slice(pd.Timestamp(start), pd.Timestamp(end)), slice(None))])
| mit |
MartinsMednis/dynamic-modelling-tools | modeler.py | 1 | 3993 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("/home/me/mypy")
import os.path
import numpy as np
# from copy import deepcopy
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame, Series
from modelling_utilities import *
# from ethanol_models import zafarLP
from model_configuration import model_options
# Process command-line arguments and determine experiment numbers
if "--model" in sys.argv:
model_name = sys.argv[sys.argv.index('--model')+1] # one argument passed after --parameters
else:
raise Exception('Model not specified.')
if "--exptitle" in sys.argv:
exptitle = sys.argv[sys.argv.index('--exptitle')+1] # one argument passed after --exptitle
if "--path" in sys.argv:
path = sys.argv[sys.argv.index('--path')+1] # one argument passed after --path
else:
path="./"
if "--paramrow" in sys.argv:
paramrow = sys.argv[sys.argv.index('--paramrow')+1] # one argument passed after --parameters
paramrow = int(paramrow)
else:
paramrow = -1
if "--experiments" in sys.argv:
expnoList = sys.argv[sys.argv.index('--experiments')+1:] # everything passed after --experiments
# Should we estimate new parameters or simply run simulation
estimate_parameters = (True if "-estimate" in sys.argv else False)
# Should we save simulation results in parameter table
savesim = (True if "-savesim" in sys.argv else False)
## load measurements data
ethanol_all = pd.read_pickle('ethanol_all2.pickle')
all_data = load_experimental_data(ethanol_all, expnoList)
# Configure the model
path,model,bounds = model_options(model_name,exptitle)
# path = "rezultati_zafarLP/"
# model = zafarLP("zafarLP", exptitle=exptitle, path=path, save=True)
# # miu_max, Yxs, mS, Ks, Kis, Kip, a, b
# bounds = [(0.01, 0.75), (0, 0.9), (0, 0.5), (0.1, 50), (0, 600), (0, 30), (2, 6), (0, 1)]
if estimate_parameters:
print("New parameter estimation for {0} ...".format(exptitle))
savesim = True
ret = model.estimate(bounds,tuple(all_data))
parameters = ret.x
model.set_parameters(parameters)
MSE = ret.fun
# R2 = model.R2(parameters, tuple(all_data))
# visi_parametri = model.parameter_table2(parameters=parameters, exptitle=exptitle, rmse=rmse, R2=R2, save=savesim)
else:
print("New simulation of {0} ...".format(exptitle))
parameters = model.load_parameters('parameters', row=paramrow)
MSE = model.multi_fit_standalone(parameters, tuple(all_data))
# R2 = model.R2(parameters, tuple(all_data))
# print("R2 = {0}".format(R2))
# visi_parametri = model.parameter_table2(parameters=parameters, exptitle=exptitle, rmse=rmse, R2=R2, save=savesim)
# print(visi_parametri.transpose())
mod_res_table = prepare_modeling_results(model,all_data,expnoList,path,exptitle)
# print(mod_res_table)
# Here we calculate separated correlation of biomass curve, etc.
XR2 = mySafeR2(mod_res_table['XnOBS'].values,mod_res_table['XnPRED'].values)
SR2 = mySafeR2(mod_res_table['SnOBS'].values,mod_res_table['SnPRED'].values)
PR2 = mySafeR2(mod_res_table['PnOBS'].values,mod_res_table['PnPRED'].values)
Xrmse = math.sqrt(mySafeMSE(mod_res_table['XnOBS'],mod_res_table['XnPRED']))
Srmse = math.sqrt(mySafeMSE(mod_res_table['SnOBS'],mod_res_table['SnPRED']))
Prmse = math.sqrt(mySafeMSE(mod_res_table['PnOBS'],mod_res_table['PnPRED']))
RMSE = math.sqrt(MSE)
R2 = model.R2(parameters, tuple(all_data))
visi_parametri = model.parameter_table2(parameters=parameters, exptitle=exptitle, rmse=RMSE, R2=R2, XR2=XR2, SR2=SR2, PR2=PR2, Xrmse=Xrmse, Srmse=Srmse, Prmse=Prmse, save=savesim)
visi_parametri.to_html("{0}parameters_{1}.html".format(path,exptitle))
plot_all_results(model,all_data,expnoList,path)
# plot_all_results_print(model,all_data,expnoList,path)
generate_simulation_results(model,all_data,expnoList,path,exptitle)
# plt.show()
# print("-------------------------")
process_desc = ("Parameter estimation" if estimate_parameters else "Simulation")
print("{0} of {1} finished".format(process_desc,exptitle)) | unlicense |
HealthCatalystSLC/healthcareai-py | healthcareai/trained_models/trained_supervised_model.py | 2 | 30435 | """A Trained Supervised Model."""
import time
from datetime import datetime
import numpy as np
import pandas as pd
import healthcareai.common.database_writers
import healthcareai.common.file_io_utilities as hcai_io
import healthcareai.common.helpers as hcai_helpers
import healthcareai.common.model_eval as hcai_model_evaluation
import healthcareai.common.top_factors as hcai_factors
import healthcareai.common.database_connections as hcai_db
import healthcareai.common.database_validators as hcai_dbval
from healthcareai.common.healthcareai_error import HealthcareAIError
class TrainedSupervisedModel(object):
"""
The meta-object that is created when training supervised models.
This object contains
- trained estimator
- trained linear estimator used for row level factor analysis
- column metadata including transformed feature columns, grain & predicted column
- the fit data preparation pipeline used for transforming new data for prediction
- calculated metrics
- test set actuals, predicted values/probabilities, predicted classes
"""
def __init__(self,
model,
feature_model,
fit_pipeline,
model_type,
column_names,
grain_column,
prediction_column,
test_set_predictions,
test_set_class_labels,
test_set_actual,
metric_by_name,
original_column_names=None,
categorical_column_info=None,
training_time=None):
"""
Create an instance of a TrainedSupervisedModel.
Args:
model (sklearn.base.BaseEstimator): The fit scikit learn algorithm for prediction
feature_model (sklearn.base.BaseEstimator): The fit scikit learn algorithm for feature importance
fit_pipeline (sklearn.pipeline.Pipeline): A fit pipeline for use on cleaning new raw data
model_type (str): 'classification' or 'regression'
column_names (list): List of column names used as features
grain_column (str): Grain column (not used as a feature).
prediction_column (str): The name of the prediction column
test_set_predictions (list): y_prediction number (either probability of class or value)
test_set_class_labels (list): y_prediction class label if classification
test_set_actual (list): y_test
metric_by_name (dict): Metrics by name
original_column_names (list): List of column names used as features before running the data preparation
pipeline (e.g. before dummification)
categorical_column_info (dict): A dictionary mapping the name of each (pre-dummified) categorical column
to a pandas.Series containing whose index consists of the different levels of the category and whose
values consist of the frequencies with which these levels occur in the training data
training_time (float): The time in seconds it took to train the model
"""
self.model = model
self.feature_model = feature_model
self.fit_pipeline = fit_pipeline
self.column_names = column_names
self._model_type = model_type
self.grain_column = grain_column
self.prediction_column = prediction_column
self.test_set_predictions = test_set_predictions
self.test_set_class_labels = test_set_class_labels
self.test_set_actual = test_set_actual
self._metric_by_name = metric_by_name
self.original_column_names = original_column_names
self.categorical_column_info = categorical_column_info
self.train_time = training_time
@property
def algorithm_name(self):
"""Model name extracted from the class type."""
model = hcai_helpers.extract_estimator_from_meta_estimator(self.model)
name = type(model).__name__
return name
@property
def is_classification(self):
"""
Return True if trainer is set up for classification.
Easy check to consolidate magic strings in all the model type switches.
"""
return self.model_type == 'classification'
@property
def is_regression(self):
"""
Return True if trainer is set up for regression.
Easy check to consolidate magic strings in all the model type switches.
"""
return self.model_type == 'regression'
@property
def best_hyperparameters(self):
"""Best hyperparameters found if model is a meta estimator."""
return hcai_helpers.get_hyperparameters_from_meta_estimator(self.model)
@property
def model_type(self):
"""Model type: 'regression' or 'classification'."""
return self._model_type
@property
def binary_classification_scores(self):
# TODO low priority, but test this
"""Return the probability scores of the first class for a binary classification model."""
if self.is_regression:
raise HealthcareAIError('ROC/PR plots are not used to evaluate regression models.')
predictions = np.squeeze(self.test_set_predictions[:, 1])
return predictions
@property
def metrics(self):
"""Return the metrics that were calculated when the model was trained."""
return self._metric_by_name
def save(self, filename=None, debug=True):
"""
Save this object to a pickle file with the given file name.
Args:
filename (str): Optional filename override. Defaults to `timestamp_<MODEL_TYPE>_<ALGORITHM_NAME>.pkl`. For
example: `2017-05-27T09-12-30_regression_LinearRegression.pkl`
debug (bool): Print debug output to console by default
"""
if filename is None:
time_string = time.strftime("%Y-%m-%dT%H-%M-%S")
filename = '{}_{}_{}.pkl'.format(time_string, self.model_type, self.algorithm_name)
hcai_io.save_object_as_pickle(self, filename)
if debug:
print('Trained {} model saved as {}'.format(self.algorithm_name, filename))
def make_predictions(self, dataframe):
"""
Given a new dataframe, apply data transformations and return a dataframe of predictions.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
Returns:
pandas.core.frame.DataFrame: A dataframe containing the grain id and predicted values
"""
# Run the raw dataframe through the preparation process
prepared_dataframe = self.prepare_and_subset(dataframe)
# make predictions returning probabity of a class or value of regression
if self.is_classification:
# Only save the prediction of one of the two classes
y_predictions = self.model.predict_proba(prepared_dataframe)[:, 1]
elif self.is_regression:
y_predictions = self.model.predict(prepared_dataframe)
else:
raise HealthcareAIError('Model type appears to be neither regression or classification.')
# Create a new dataframe with the grain column from the original dataframe
results = pd.DataFrame()
# Column vector must exist in order to add it to results.
if self.grain_column is not None:
results[self.grain_column] = dataframe[self.grain_column].values
results['Prediction'] = y_predictions
return results
def prepare_and_subset(self, dataframe):
"""
Prepare and subset the raw data using the pipeline saved during training.
Run the raw dataframe through the saved pipeline and return a dataframe that contains only the columns that were
in the original model.
This prevents any unexpected changes to incoming columns from interfering with the predictions.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
Returns:
pandas.core.frame.DataFrame: A dataframe that has been run through the pipeline and subsetted to only the
columns the model expects.
"""
# We want to be able to make predictions on new data (without labels) so don't want to insist that the
# prediction column be present in the new data. To get around this, add the prediction columns filled with
# NaNs. This column should be dropped when the dataframe is run through the pipeline.
if self.prediction_column not in dataframe.columns.values \
and self.prediction_column in self.original_column_names:
dataframe[self.prediction_column] = np.NaN
try:
# Raise an error here if any of the columns the model expects are not in the prediction dataframe
df2 = dataframe.copy()
if self.original_column_names is not None:
df2 = df2[self.original_column_names]
# Change the dtype of the categorical columns in the prediction dataframe to 'category' with levels
# determined by the training data before running the data preparation pipeline
if self.categorical_column_info is not None:
for column in self.categorical_column_info:
col_categories = self.categorical_column_info[column].index
df2[column] = df2[column].astype('category', categories=col_categories)
# Check whether the prediction data contains categories not present in the training set and print
# a message warning that these new values will be dropped and imputed
new_values = {v for v in dataframe[column].unique() if not (v in col_categories or pd.isnull(v))}
if len(new_values) > 0:
category_message = """Column {} contains levels not seen in the training set. These levels have
been removed and will be imputed or the corresponding rows dropped.\nNew levels: {}"""
print(category_message.format(column, new_values))
# Run the saved data preparation pipeline
prepared_dataframe = self.fit_pipeline.transform(df2)
# Subset the dataframe to only columns that were saved from the original model training
prepared_dataframe = prepared_dataframe[self.column_names]
except KeyError as ke:
required_columns = self.column_names
found_columns = list(dataframe.columns)
# If a pre-dummified dataset is expected as the input, list the pre-dummified columns instead of the dummies
if not self.original_column_names is None:
required_columns = self.original_column_names
error_message = """One or more of the columns that the saved trained model needs is not in the dataframe.\n
Please compare these lists to see which field(s) is/are missing. Note that you can pass in extra fields,\n
which will be ignored, but you must pass in all the required fields.\n
Required fields: {}
Given fields: {}
Likely missing field(s): {}
""".format(required_columns, found_columns, ke)
raise HealthcareAIError(error_message)
return prepared_dataframe
def make_factors(self, dataframe, number_top_features=3):
"""
Given a prediction dataframe, build and return a list of the top k features in dataframe format.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
number_top_features (int): Number of top features per row
Returns:
pandas.core.frame.DataFrame: A dataframe containing the grain id and factors
"""
# Run the raw dataframe through the preparation process
prepared_dataframe = self.prepare_and_subset(dataframe)
# Create a new dataframe. If grain column exists, add the grain
# column from the original dataframe; otherwise,
# just create a new empty dataframe.
if self.grain_column is not None:
results = dataframe[[self.grain_column]]
else:
results = pd.DataFrame()
# Create a list of column names
reason_col_names = ['Factor{}TXT'.format(i) for i in range(1, number_top_features + 1)]
# Get a 2 dimensional list of all the factors
top_features = hcai_factors.top_k_features(prepared_dataframe, self.feature_model, k=number_top_features)
# Verify that the number of factors matches the number of rows in the original dataframe.
if len(top_features) != len(dataframe):
raise HealthcareAIError('Warning! The number of predictions does not match the number of rows.')
# Create a dataframe from the column names and top features
reasons_df = pd.DataFrame(top_features, columns=reason_col_names, index=dataframe.index)
# Join the top features and results dataframes
results = pd.concat([results, reasons_df], axis=1, join_axes=[dataframe.index])
# results.set_index(keys=self.grain_column, inplace=True)
return results
def make_predictions_with_k_factors(self, dataframe, number_top_features=3):
"""
Create a datarrame with predictions and factors.
Given a prediction dataframe, build and return a dataframe with the grain column, the predictions and the top k
features.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
number_top_features (int): Number of top features per row
Returns:
pandas.core.frame.DataFrame: Predictions with factors and grain column
"""
# TODO Note this is inefficient since we are running the raw dataframe through the pipeline twice. Consider
# Get the factors and predictions
results = self.make_factors(dataframe, number_top_features=number_top_features)
predictions = self.make_predictions(dataframe)
# Verify that the number of predictions matches the number of rows in the original dataframe.
if len(predictions) != len(dataframe):
raise HealthcareAIError('Warning! The number of predictions does not match the number of rows.')
# Add predictions column to dataframe
results['Prediction'] = predictions['Prediction'].values
return results
def make_original_with_predictions_and_factors(self, dataframe, number_top_features=3):
"""
Create a dataframe containing the original data, predictions and factors.
Given a prediction dataframe, build and return a dataframe with the all the original columns, the predictions,
and the top k features.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
number_top_features (int): Number of top features per row
Returns:
pandas.core.frame.DataFrame:
"""
# TODO Note this is inefficient since we are running the raw dataframe through the pipeline twice.
# Get the factors and predictions
results = self.make_predictions_with_k_factors(dataframe, number_top_features=number_top_features)
# replace the original prediction column
original_dataframe = dataframe.drop([self.prediction_column], axis=1)
# Join the two dataframes together
results = pd.concat([original_dataframe, results], axis=1)
return results
def create_catalyst_dataframe(self, dataframe):
"""
Create a Health Catalyst specific dataframe of predictions.
Given a prediction dataframe, build and return a dataframe with the health catalyst specific column names, the
predictions, and the top 3 features.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
Returns:
pandas.core.frame.DataFrame:
"""
# Get predictions and on the top 3 features (catalyst SAMs expect 3 factors)
factors_and_predictions_df = self.make_predictions_with_k_factors(dataframe, number_top_features=3)
# Add all the catalyst-specific columns to back into the SAM
factors_and_predictions_df['BindingID'] = 0
factors_and_predictions_df['BindingNM'] = 'Python'
factors_and_predictions_df['LastLoadDTS'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return factors_and_predictions_df
def predict_to_catalyst_sam(self, dataframe, server, database, table, schema=None, predicted_column_name=None):
"""
Given a dataframe you want predictions on, make predictions and save them to a catalyst-specific EDW table.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
server (str): the target server name
database (str): the database name
table (str): the destination table name
schema (str): the optional schema
predicted_column_name (str): optional predicted column name (defaults to PredictedProbNBR or
PredictedValueNBR)
"""
# Make predictions in specific format
sam_df = self.create_catalyst_dataframe(dataframe)
# Rename prediction column to default based on model type or given one
if predicted_column_name is None:
if self.is_classification:
predicted_column_name = 'PredictedProbNBR'
elif self.is_regression:
predicted_column_name = 'PredictedValueNBR'
sam_df.rename(columns={'Prediction': predicted_column_name}, inplace=True)
try:
engine = hcai_db.build_mssql_engine_using_trusted_connections(server, database)
healthcareai.common.database_writers.write_to_db_agnostic(engine, table, sam_df, schema=schema)
except HealthcareAIError as hcaie:
# Run validation and alert user
hcai_dbval.validate_catalyst_prediction_sam_connection(server, table, self.grain_column, self.prediction_column)
raise HealthcareAIError(hcaie.message)
def predict_to_sqlite(self,
prediction_dataframe,
database,
table,
prediction_generator,
predicted_column_name=None):
"""
Given a dataframe you want predictions on, make predictions and save them to an sqlite table.
Args:
prediction_dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
database (str): database file name
table (str): table name
prediction_generator (method): one of the trained supervised model prediction methods
predicted_column_name (str): optional predicted column name (defaults to PredictedProbNBR or
PredictedValueNBR)
"""
# validate inputs
if type(prediction_generator).__name__ != 'method':
raise HealthcareAIError(
'Use of this method requires a prediction generator from a trained supervised model')
# Get predictions from given generator
sam_df = prediction_generator(prediction_dataframe)
# Rename prediction column to default based on model type or given one
if predicted_column_name is None:
if self.is_classification:
predicted_column_name = 'PredictedProbNBR'
elif self.is_regression:
predicted_column_name = 'PredictedValueNBR'
sam_df.rename(columns={'Prediction': predicted_column_name}, inplace=True)
engine = hcai_db.build_sqlite_engine(database)
healthcareai.common.database_writers.write_to_db_agnostic(engine, table, sam_df)
def roc_plot(self):
"""Return a plot of the ROC curve of the holdout set from model training."""
self.validate_classification()
tsm_classification_comparison_plots(trained_supervised_models=self, plot_type='ROC')
def roc(self, print_output=True):
"""
Print out ROC details and return them with cutoffs.
Note this is a simple subset of TrainedSupervisedModel.metrics()
Args:
print_output (bool): True (default) to print a table of output.
Returns:
dict: A subset of TrainedSupervisedModel.metrics() that are ROC specific
"""
self.validate_classification()
metrics = self._metric_by_name
roc = {
'roc_auc': metrics['roc_auc'],
'best_roc_cutoff': metrics['best_roc_cutoff'],
'best_true_positive_rate': metrics['best_true_positive_rate'],
'best_false_positive_rate': metrics['best_false_positive_rate'],
'roc_thresholds': metrics['roc_thresholds'],
'true_positive_rates': metrics['true_positive_rates'],
'false_positive_rates': metrics['false_positive_rates'],
}
# roc = self._metric_by_name
if print_output:
print(('\nReceiver Operating Characteristic (ROC):\n'
' Area under curve (ROC AUC): {:0.2f}\n'
' Ideal ROC cutoff is {:0.2f}, yielding TPR of {:0.2f} and FPR of {:0.2f}').format(
roc['roc_auc'],
roc['best_roc_cutoff'],
roc['best_true_positive_rate'],
roc['best_false_positive_rate']))
print('|--------------------------------|')
print('| ROC |')
print('| Threshhold | TPR | FPR |')
print('|--------------|--------|--------|')
for i, _ in enumerate(roc['roc_thresholds']):
marker = '***' if roc['roc_thresholds'][i] == roc['best_roc_cutoff'] else ' '
print('| {} {:03.2f} | {:03.2f} | {:03.2f} |'.format(
marker,
roc['roc_thresholds'][i],
roc['true_positive_rates'][i],
roc['false_positive_rates'][i]))
print('|--------------------------------|')
print('| *** Ideal cutoff |')
print('|--------------------------------|')
return roc
def pr_plot(self):
"""Return a plot of the PR curve of the holdout set from model training."""
self.validate_classification()
tsm_classification_comparison_plots(trained_supervised_models=self, plot_type='PR')
def pr(self, print_output=True):
"""
Print out PR details and return them with cutoffs.
Note this is a simple subset of TrainedSupervisedModel.metrics()
Args:
print_output (bool): True (default) to print a table of output.
Returns:
dict: A subset of TrainedSupervisedModel.metrics() that are PR specific
"""
self.validate_classification()
metrics = self._metric_by_name
pr = {
'pr_auc': metrics['pr_auc'],
'best_pr_cutoff': metrics['best_pr_cutoff'],
'best_precision': metrics['best_precision'],
'best_recall': metrics['best_recall'],
'pr_thresholds': metrics['pr_thresholds'],
'precisions': metrics['precisions'],
'recalls': metrics['recalls'],
}
if print_output:
print(('\nPrecision-Recall:\n'
' Area under Precision Recall curve (PR AUC): {:0.2f}\n'
' Ideal PR cutoff is {:0.2f}, yielding precision of {:04.3f} and recall of {:04.3f}').format(
pr['pr_auc'],
pr['best_pr_cutoff'],
pr['best_precision'],
pr['best_recall']))
print('|---------------------------------|')
print('| Precision-Recall Thresholds |')
print('| Threshhold | Precision | Recall |')
print('|------------|-----------|--------|')
for i, _ in enumerate(pr['pr_thresholds']):
marker = '***' if pr['pr_thresholds'][i] == pr['best_pr_cutoff'] else ' '
print('| {} {:03.2f} | {:03.2f} | {:03.2f} |'.format(
marker,
pr['pr_thresholds'][i],
pr['precisions'][i],
pr['recalls'][i]))
print('|---------------------------------|')
print('| *** Ideal cutoff |')
print('|---------------------------------|')
return pr
def validate_classification(self):
"""Validate that a model is classification and raise an error if it is not.
Run this on any method that only makes sense for classification.
"""
# TODO add binary check and rename to validate_binary_classification
if self.model_type != 'classification':
raise HealthcareAIError('This function only runs on a binary classification model.')
def print_training_results(self):
"""
Print metrics, stats and hyperparameters of a trained supervised model.
This includes the model name, training time, hyperparameters, and performance metrics.
"""
print('{} Training Results:'.format(self.algorithm_name))
print('- Training time:')
print(' Trained the {} model in {} seconds'.format(self.algorithm_name,
round(self.train_time, 2)))
hyperparameters = self.best_hyperparameters
if hyperparameters is None:
hyperparameters = 'N/A: No hyperparameter search was performed'
print('- Best hyperparameters found were:\n {}'.format(hyperparameters))
if self._model_type == 'classification':
print('- {} performance metrics:\n Accuracy: {:03.2f}\n ROC AUC: {:03.2f}\n PR AUC: {:03.2f}'.format(
self.algorithm_name,
self.metrics['accuracy'],
self.metrics['roc_auc'],
self.metrics['pr_auc']))
elif self._model_type == 'regression':
print('- {} performance metrics:\n Mean Squared Error (MSE): {}\n Mean Absolute Error (MAE): {}'.format(
self.algorithm_name,
self.metrics['mean_squared_error'],
self.metrics['mean_absolute_error']))
def get_estimator_from_trained_supervised_model(trained_supervised_model):
"""
Given an instance of a TrainedSupervisedModel, return the main estimator, regardless of random search.
Args:
trained_supervised_model (TrainedSupervisedModel):
Returns:
sklearn.base.BaseEstimator: The scikit learn estimator
"""
# Validate input is a TSM
if not isinstance(trained_supervised_model, TrainedSupervisedModel):
raise HealthcareAIError('This requires an instance of a TrainedSupervisedModel')
"""
1. check if it is a TSM
Y: proceed
N: raise error?
2. check if tsm.model is a meta estimator
Y: extract best_estimator_
N: return tsm.model
"""
# Check if tsm.model is a meta estimator
result = hcai_helpers.extract_estimator_from_meta_estimator(trained_supervised_model.model)
return result
def tsm_classification_comparison_plots(trained_supervised_models, plot_type='ROC', save=False):
"""
Given a single or list of trained supervised models, plot a ROC or PR curve for each one.
Args:
plot_type (str): 'ROC' (default) or 'PR'
trained_supervised_models (TrainedSupervisedModel): a single or iterable containing TrainedSupervisedModels
save (bool): Save the plot to a file
"""
# Input validation and dispatch
if plot_type == 'ROC':
plotter = hcai_model_evaluation.roc_plot_from_thresholds
elif plot_type == 'PR':
plotter = hcai_model_evaluation.pr_plot_from_thresholds
else:
raise HealthcareAIError('Please choose either plot_type=\'ROC\' or plot_type=\'PR\'')
metrics_by_model = {}
try:
for index, model in enumerate(trained_supervised_models):
if not isinstance(model, TrainedSupervisedModel):
raise HealthcareAIError('One of the objects in the list is not a TrainedSupervisedModel ({})'
.format(model))
algorithm_name = "{}: {}".format(index + 1, model.algorithm_name)
metrics_by_model[algorithm_name] = model.metrics
except TypeError:
# input is not iterable (assume single TSM)
if not isinstance(trained_supervised_models, TrainedSupervisedModel):
raise HealthcareAIError('Input is not a TrainedSupervisedModel ({})'.format(trained_supervised_models))
metrics_by_model[trained_supervised_models.algorithm_name] = trained_supervised_models.metrics
# TODO so, you could check for different GUIDs that could be saved in each TSM!
# The assumption here is that each TSM was trained on the same train test split,
# which happens when instantiating SupervisedModelTrainer
# Plot with the selected plotter
plotter(metrics_by_model, save=save, debug=False)
def plot_rf_features_from_tsm(trained_supervised_model, x_train, feature_limit=15, save=False):
"""
Given an instance of a TrainedSupervisedModel, the x_train data, display or save a feature importance graph.
Args:
trained_supervised_model (TrainedSupervisedModel):
x_train (numpy.array): A 2D numpy array that was used for training
feature_limit (int): The maximum number of features to plot
save (bool): True to save the plot, false to display it in a blocking thread
"""
model = get_estimator_from_trained_supervised_model(trained_supervised_model)
column_names = trained_supervised_model.column_names
hcai_model_evaluation.plot_random_forest_feature_importance(
model,
x_train,
column_names,
feature_limit=feature_limit,
save=save)
| mit |
GGiecold/pyRMT | setup.py | 1 | 1778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from codecs import open
from os import path
from sys import version
from setuptools import setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding = 'utf-8') as f:
long_description = f.read()
setup(name = 'pyRMT',
version = '0.1.0',
description = 'Python for Random Matrix Theory: cleaning schemes for noisy correlation matrices',
long_description = long_description,
url = 'https://github.com/GGiecold/pyRMT',
download_url = 'https://github.com/GGiecold/pyRMT',
author = 'Gregory Giecold',
author_email = 'g.giecold@gmail.com',
maintainer = 'Gregory Giecold',
maintainer_email = 'g.giecold@gmail.com',
license = 'MIT License',
py_modules = ['pyRMT'],
platforms = 'ALL',
install_requires = ['numpy', 'pandas', 'sklearn'],
setup_requires = ['numpy'],
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics', ],
keywords = 'applied-mathematics cleaning correlation-matrices noise-reduction random-matrix-theory'
)
| mit |
Barmaley-exe/scikit-learn | sklearn/ensemble/__init__.py | 44 | 1228 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| agpl-3.0 |
effigies/mne-python | mne/viz/evoked.py | 1 | 12200 | """Functions to make simple plot on evoked M/EEG data (besides topographies)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
from itertools import cycle
import numpy as np
from ..io.pick import channel_type
from ..externals.six import string_types
from .utils import _mutable_defaults, _check_delayed_ssp
from .utils import _draw_proj_checkbox, tight_layout
def _plot_evoked(evoked, picks, exclude, unit, show,
ylim, proj, xlim, hline, units,
scalings, titles, axes, plot_type,
cmap=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
scalings, titles, units = _mutable_defaults(('scalings', scalings),
('titles', titles),
('units', units))
channel_types = set(key for d in [scalings, titles, units] for key in d)
channel_types = sorted(channel_types) # to guarantee consistent order
if picks is None:
picks = list(range(evoked.info['nchan']))
bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
if ch in evoked.ch_names]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list)
and all([isinstance(ch, string_types) for ch in exclude])):
exclude = [evoked.ch_names.index(ch) for ch in exclude]
else:
raise ValueError('exclude has to be a list of channel names or '
'"bads"')
picks = list(set(picks).difference(exclude))
types = [channel_type(evoked.info, idx) for idx in picks]
n_channel_types = 0
ch_types_used = []
for t in channel_types:
if t in types:
n_channel_types += 1
ch_types_used.append(t)
axes_init = axes # remember if axes where given as input
fig = None
if axes is None:
fig, axes = plt.subplots(n_channel_types, 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if axes_init is not None:
fig = axes[0].get_figure()
if not len(axes) == n_channel_types:
raise ValueError('Number of axes (%g) must match number of channel '
'types (%g)' % (len(axes), n_channel_types))
# instead of projecting during each iteration let's use the mixin here.
if proj is True and evoked.proj is not True:
evoked = evoked.copy()
evoked.apply_proj()
times = 1e3 * evoked.times # time in miliseconds
for ax, t in zip(axes, ch_types_used):
ch_unit = units[t]
this_scaling = scalings[t]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
idx = [picks[i] for i in range(len(picks)) if types[i] == t]
if len(idx) > 0:
# Parameters for butterfly interactive plots
if plot_type == 'butterfly':
if any([i in bad_ch_idx for i in idx]):
colors = ['k'] * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
ax._get_lines.color_cycle = iter(colors)
else:
ax._get_lines.color_cycle = cycle(['k'])
# Set amplitude scaling
D = this_scaling * evoked.data[idx, :]
# plt.axes(ax)
if plot_type == 'butterfly':
ax.plot(times, D.T)
elif plot_type == 'image':
im = ax.imshow(D, interpolation='nearest', origin='lower',
extent=[times[0], times[-1], 0, D.shape[0]],
aspect='auto', cmap=cmap)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and t in ylim:
if plot_type == 'butterfly':
ax.set_ylim(ylim[t])
elif plot_type == 'image':
im.set_clim(ylim[t])
ax.set_title(titles[t] + ' (%d channel%s)' % (
len(D), 's' if len(D) > 1 else ''))
ax.set_xlabel('time (ms)')
if plot_type == 'butterfly':
ax.set_ylabel('data (%s)' % ch_unit)
elif plot_type == 'image':
ax.set_ylabel('channels (%s)' % 'index')
else:
raise ValueError("plot_type has to be 'butterfly' or 'image'."
"Got %s." % plot_type)
if (plot_type == 'butterfly') and (hline is not None):
for h in hline:
ax.axhline(h, color='r', linestyle='--', linewidth=2)
if axes_init is None:
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
axes=axes, types=types, units=units, scalings=scalings,
unit=unit, ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
if show and plt.get_backend() != 'agg':
plt.show()
fig.canvas.draw() # for axes plots update axes.
tight_layout(fig=fig)
return fig
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, proj=False, xlim='tight', hline=None, units=None,
scalings=None, titles=None, axes=None, plot_type="butterfly"):
"""Plot evoked data
Note: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() as the end or not.
ylim : dict | None
ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
hline : list of floats | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="butterfly")
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
clim=None, proj=False, xlim='tight', units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() as the end or not.
clim : dict | None
clim for plots. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim,
hline=None, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="image",
cmap=cmap)
def _plot_update_evoked(params, bools):
""" update the plot evoked lines
"""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
times = evoked.times * 1e3
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
[line.set_data(times, di) for line, di in zip(ax.lines, D)]
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
| bsd-3-clause |
microelly2/geodata | geodat/import_image.py | 1 | 9439 | '''import a image as elevation grid'''
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- geodat import image to nurbs/pcl
#--
#-- microelly 2016 v 0.2
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
from geodat.say import *
import Points
import sys
if sys.version_info[0] !=2:
from importlib import reload
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import os.path
import csv,re
import random
## create the pointcloud, grid or nurbs surface
#
# @param filename image file
# @param n size of a flat border around the area
# @param c color channel (not used)
# @param inverse invert the height
# @param kx scale the size value 1 means 1 pixel is 1 mm
# @param ky
# @param kz
# @param gengrid create a grid of the isocurves
# @param genblock create a solid with the surface as the top face
# @param genpoles create a pointcloud of the poles
# @param pointsonly create only a point cloud of the pixel points
#
# For large images the computation time can be very long,
# so the option to run **pointsonly** first is a good way to inspect the image data.
#
# .
def import_image(filename=None,n=10,c=2,inverse=False,kx=10,ky=10,kz=60,gengrid=True,genblock=False,genpoles=False,pointsonly=False):
'''import_image(filename=None,n=10,c=2,inverse=False,kx=10,ky=10,kz=60,gengrid=True,genblock=False,genpoles=False,pointsonly=False)
'''
display_mathplot=False
dataAsPoles=False
if filename==None:
filename='/home/microelly2/Schreibtisch/test_nurbs3.png'
img = mpimg.imread(filename)
ojn=os.path.basename(filename)
# grey scaled image and color channel select
if len(img.shape) == 2:
lum_img = img
else:
lum_img = img[:,:,c]
lum_img = 0.33*img[:,:,0]+0.33*img[:,:,1]+0.34*img[:,:,2]
(lu,lv)=lum_img.shape
if display_mathplot:
plt.imshow(lum_img)
plt.show()
#create a n points border
uu2=np.zeros((lu+2*n)*(lv+2*n))
uu2=uu2.reshape(lu+2*n,lv+2*n)
uu2[n:lu+n,n:lv+n]=lum_img
lum_img=uu2
(lu,lv)=lum_img.shape
bz=kz+100
if inverse: kz= -kz
pts=[]
uu=[]
for u in range(lu):
ul=[]
for v in range(lv):
# p=FreeCAD.Vector(ky*v,-kx*u,bz-kz*lum_img[u,v])
r=0.001
p=FreeCAD.Vector(ky*v+r*random.random(),-kx*u+r*random.random(),bz-kz*lum_img[u,v] +r*random.random())
ul.append(p)
pts.append(p)
uu.append(ul)
# show the points
p=Points.Points(pts)
Points.show(p)
App.ActiveDocument.ActiveObject.ViewObject.ShapeColor=(1.0,.0,1.0)
App.ActiveDocument.ActiveObject.Label="Points " + str(lv) +" " + str(lu) + " _"
say ((("u, v, points"),u,v,len(pts)))
Gui.updateGui()
if pointsonly: return
tt=Part.BSplineSurface()
if dataAsPoles:
pols=uu
else:
tt.interpolate(uu)
# flatten the border
pols=tt.getPoles()
pols2=np.array(pols)
lup,lvp,la=pols2.shape
zz=bz
bord=n-2
for u in range(lup):
for v in range(bord):
op=pols[u][v]
nup=FreeCAD.Vector(op.x,op.y,zz)
pols[u][v]=nup
op=pols[u][-1-v]
nup=FreeCAD.Vector(op.x,op.y,zz)
pols[u][-1-v]=nup
for u in range(bord):
for v in range(lvp):
op=pols[u][v]
nup=FreeCAD.Vector(op.x,op.y,zz)
pols[u][v]=nup
op=pols[-1-u][v]
nup=FreeCAD.Vector(op.x,op.y,zz)
pols[-1-u][v]=nup
bs=Part.BSplineSurface()
knot_v2=tt.getVKnots()
knot_u2=tt.getUKnots()
mult_u=tt.getUMultiplicities()
mult_v=tt.getVMultiplicities()
ws=tt.getWeights()
bs.buildFromPolesMultsKnots(pols,
mult_u,
mult_v,
knot_u2,
knot_v2,
False,False,3,3,
ws
)
if 1:
sha=bs.toShape()
# show the poles
if genpoles:
pols=bs.getPoles()
ps=[]
for l in pols:
for po in l:
ps.append(po)
p=Points.Points(ps)
Points.show(p)
App.ActiveDocument.ActiveObject.ViewObject.ShapeColor=(1.0,1.0,.0)
App.ActiveDocument.ActiveObject.Label='Poles ' + str(lv) +" " + str(lu) +" _"
Gui.updateGui()
# the nurbs grid
if gengrid:
jj=[]
rr=lv
for i in range(rr+1):
v=bs.vIso((0.0+i)/rr).toShape()
jj.append(v.Edge1)
rr=lu
for i in range(rr+1):
v=bs.uIso((0.0+i)/rr).toShape()
jj.append(v.Edge1)
com=Part.makeCompound(jj)
ttt=App.ActiveDocument.addObject('Part::Feature','Nurbsgrid ' + str(n) + ' ' + ojn)
ttt.ViewObject.DisplayMode = "Wireframe"
# ttt.ViewObject.hide()
ttt.Shape=com
ttt.Placement.Base.z=10
Gui.updateGui()
return
#create the solid
a=FreeCAD.Vector(0,0,-bz)
b=FreeCAD.Vector(0,-kx*(lu-1),-bz)
c=FreeCAD.Vector(ky*(lv-1),-kx*(lu-1),-bz)
d=FreeCAD.Vector(ky*(lv-1),0,-bz)
ad=FreeCAD.Vector(0,0,bz)
bd=FreeCAD.Vector(0,-kx*(lu-1),bz)
cd=FreeCAD.Vector(ky*(lv-1),-kx*(lu-1),bz)
dd=FreeCAD.Vector(ky*(lv-1),0,bz)
# for nonlinear borders - experimental
if 0:
u0e=bs.uIso(0).toShape()
p=Part.makePolygon([ad,a,d,dd],True)
ll=p.Edges+[u0e.Edge1]
f4=Part.makeFilledFace(Part.__sortEdges__(ll))
Part.show(f4)
v0e=bs.vIso(0).toShape()
p=Part.makePolygon([bd,b,a,ad],True)
ll=p.Edges+[v0e.Edge1]
f1=Part.makeFilledFace(Part.__sortEdges__(ll))
#Part.show(v0e)
#Part.show(p)
Part.show(f1)
u1e=bs.uIso(1).toShape()
p=Part.makePolygon([bd,b,c,cd],True)
ll=p.Edges+[u1e.Edge1]
f2=Part.makeFilledFace(Part.__sortEdges__(ll))
# f2=Part.Face(Part.__sortEdges__(ll))
#Part.show(u1e)
#Part.show(p)
Part.show(f2)
v1e=bs.vIso(1).toShape()
p=Part.makePolygon([dd,d,c,cd],True)
ll=p.Edges+[v1e.Edge1]
f3=Part.makeFilledFace(Part.__sortEdges__(ll))
#Part.show(v1e)
#Part.show(p)
Part.show(f3)
p=Part.makePolygon([a,b,c,d], True)
f0=Part.makeFilledFace(p.Edges)
Part.show(f0)
if 1:
nb=App.ActiveDocument.addObject('Part::Spline','Nurbs ' + str(n) + ' ' + ojn)
nb.Shape=sha.Face1
if genblock:
fln=sha.Face1
f0=Part.Face(Part.makePolygon([a,b,c,d], True))
f1=Part.Face(Part.makePolygon([ad,bd,b,a], True))
f2=Part.Face(Part.makePolygon([b,bd,cd,c], True))
f3=Part.Face(Part.makePolygon([cd,dd,d,c], True))
f4=Part.Face(Part.makePolygon([ad,a,d,dd], True))
fls=[f0,f1,f2,f3,f4,fln]
sh=Part.Shell(fls)
sol=Part.Solid(sh)
ttt=App.ActiveDocument.addObject('Part::Feature','Nurbsblock ' + str(n) + ' ' + ojn)
ttt.Shape=sol
# ttt.ViewObject.hide()
print (lu,lv)
return bs
# bs=createNurbsblock('/home/microelly2/Schreibtisch/fisch.jpg',5,0,True,1000,1000,10)
#bs=createNurbsblock('/home/microelly2/Bilder/fcsw.png',5,0,True,10,10,40)
#bs=createNurbsblock('/home/microelly2/Schreibtisch/freeka.png',10,0,1,100,100,400)
#jpeg brauch kleiner werte #+#
#bs=createNurbsblock('/home/microelly2/Schreibtisch/quick61.jpg',10,0,True,100,100,3)
#bs=createNurbsblock('/home/microelly2/Schreibtisch/normanc.jpg',10,0,True,100,100,4)
sdialog='''
#VerticalLayoutTab:
MainWindow:
VerticalLayout:
id:'main'
QtGui.QLabel:
setText:"*** I M A G E T O N U R B S ***"
VerticalLayout:
id:'img1'
# setVisible:False
QtGui.QPushButton:
setText: "Browse for input data filename"
clicked.connect: app.getfn
QtGui.QLineEdit:
setText:"UserAppData/Mod/geodat/testdata/freeka.png"
id: 'bl'
HorizontalLayout:
QtGui.QLabel:
setText:"Scale "
QtGui.QLineEdit:
setText:"10"
id: 'kx'
QtGui.QLineEdit:
setText:"10"
id: 'ky'
QtGui.QLineEdit:
setText:"60"
id: 'kz'
QtGui.QCheckBox:
id: 'inverse'
setText: 'Invert Height'
setChecked: False
QtGui.QLabel:
setText:"Border Size "
QtGui.QLineEdit:
setText:"5"
id: 'border'
QtGui.QLabel:
setText:"Color Channel RGB 012 3-grey noimp "
QtGui.QLineEdit:
setText:"2"
id: 'color'
QtGui.QCheckBox:
id: 'pointsonly'
setText: 'create only a Pointcloud'
setChecked: True
QtGui.QCheckBox:
id: 'gengrid'
setText: 'create Nurbs Grid'
setChecked: True
QtGui.QCheckBox:
id: 'genblock'
setText: 'create Nurbsblock Solid'
setChecked: False
QtGui.QCheckBox:
id: 'genpoles'
setText: 'create Pole Cloud'
setChecked: False
QtGui.QPushButton:
setText: "import image"
id:'run'
clicked.connect: app.run
'''
## the gui backend
class MyApp(object):
def run(self):
'''load the file and create a nurbs surface'''
try:
filename=self.root.ids['bl'].text()
if filename.startswith('UserAppData'):
filename=filename.replace('UserAppData',FreeCAD.ConfigGet("UserAppData"))
ts=time.time()
bs=import_image(
filename,
int(self.root.ids['border'].text()),
int(self.root.ids['color'].text()),
self.root.ids['inverse'].isChecked(),
int(self.root.ids['kx'].text()),
int(self.root.ids['ky'].text()),
int(self.root.ids['kz'].text()),
self.root.ids['gengrid'].isChecked(),
self.root.ids['genblock'].isChecked(),
self.root.ids['genpoles'].isChecked(),
self.root.ids['pointsonly'].isChecked(),
)
te=time.time()
say("load image time " + str(round(te-ts,2)))
except:
sayexc()
def getfn(self):
''' get the filename of the image file'''
fileName = QtGui.QFileDialog.getOpenFileName(None,u"Open File",u"/tmp/");
print(fileName)
self.root.ids['bl'].setText(fileName[0])
## the gui startup
def mydialog(run=True):
app=MyApp()
import geodat
import geodat.miki as miki
reload(miki)
miki=miki.Miki()
miki.app=app
app.root=miki
miki.parse2(sdialog)
miki.run(sdialog)
return miki
## start and hide the gui dialog
def runtest():
m=mydialog()
m.objects[0].hide()
if __name__ == '__main__':
runtest()
def importImage():
mydialog()
| lgpl-3.0 |
buguen/pylayers | pylayers/mobility/agent.py | 1 | 11442 | from SimPy.SimulationRT import Simulation
#from simpy.simulation import *
from pylayers.mobility.transit.Person import Person
from pylayers.mobility.transit.World import world
from pylayers.mobility.transit.SteeringBehavior import Seek, Separation, Containment, InterpenetrationConstraint, queue_steering_mind
import numpy as np
import networkx as nx
import time
import ConfigParser
import pandas as pd
import pylayers.util.pyutil as pyu
from pylayers.network.network import Node, Network
from pylayers.network.communication import Gcom, TX, RX
from pylayers.location.localization import Localization, PLocalization
from pylayers.gis.layout import Layout
from pylayers.util.utilnet import *
#from pylayers.util.pymysqldb import Database
import pdb
""""
.. currentmodule:: pylayers.mobility.agent
.. autosummary::
:toctree: generated/
"""
class Agent(object):
""" Class Agent
Members
-------
args
ID
name
typ
net
epwr
gcom
sim
wstd
sens
dcond
meca : transit.Person
net : pylayers.network.Network
sim :
PN :
rxt
rxr
"""
def __init__(self, **args):
""" Mobile Agent Init
Parameters
----------
'ID': string
agent ID
'name': string
Agent name
'typ': string
agent typ . 'ag' for moving agent, 'ap' for static acces point
'pos' : np.array([])
numpy array containing the initial position of the agent
'roomId': int
Room number where the agent is initialized (Layout.Gr)
'meca_updt': float
update time interval for the mechanical process
'loc': bool
enable/disable localization process of the agent
'loc_updt': float
update time interval for localization process
'L': pylayers.gis.Layout()
'net':pylayers.network.Network(),
'wstd': list of string
list of used radio access techology of the agent
'world': transit.world()
Soon deprecated
'save': list of string
list of save method ( soon deprecated)
'sim':Simpy.SimulationRT.Simulation(),
'epwr': dictionnary
dictionnary of emmited power of transsmitter{'wstd#':epwr value}
'sens': dictionnary
dictionnary of sensitivity of reveicer {'wstd#':sens value}
'dcond': dictionnary
Not used yet
'gcom':pylayers.communication.Gcom()
Communication graph
'comm_mod': string
Communication between nodes mode:
'autonomous': all TOAs are refreshed regulary
'synchro' : only visilbe TOAs are refreshed
"""
defaults = {'ID': '0',
'name': 'johndoe',
'typ': 'ag',
'color': 'k',
'pdshow': False,
'pos': np.array([]),
'roomId': -1,
'froom': [],
'wait': [],
'seed': 0,
'cdest': 'random',
'meca_updt': 0.1,
'loc': False,
'loc_updt': 0.5,
'loc_method': ['geo'],
'L': Layout(),
'network': True,
'net': Network(),
'wstd': ['rat1'],
'world': world(),
'save': [],
'sim': Simulation(),
'epwr': {},
'sens': {},
'dcond': {},
'gcom': Gcom(),
'comm_mode': 'autonomous'}
for key, value in defaults.items():
if key not in args:
args[key] = value
self.args = args
self.ID = args['ID']
self.name = args['name']
self.typ = args['typ']
# Create Network
self.net = args['net']
self.epwr = args['epwr']
self.gcom = args['gcom']
self.sim = args['sim']
self.wstd = args['wstd']
if args['epwr'] == {}:
self.epwr = {x: 0 for x in self.wstd}
else:
self.epwr = args['epwr']
if args['sens'] == {}:
self.sens = {x: -180 for x in self.wstd}
else:
self.sens = args['sens']
try:
self.dcond = args['dcond']
except:
pass
# check if node id already given
if self.ID in self.net.nodes():
raise NameError(
'another agent has the ID: ' + self.ID + ' .Please use an other ID')
if self.typ == 'ag':
# mechanical init
self.meca = Person(ID=self.ID,
color=args['color'],
pdshow=args['pdshow'],
roomId=args['roomId'],
L=args['L'],
net=self.net,
interval=args['meca_updt'],
wld=args['world'],
sim=args['sim'],
seed=args['seed'],
moving=True,
froom=args['froom'],
wait=args['wait'],
cdest=args['cdest'],
save=args['save']
)
self.meca.behaviors = [Seek(), Containment(),
Separation(), InterpenetrationConstraint()]
self.meca.steering_mind = queue_steering_mind
# Network init
self.node = Node(ID=self.ID,name=self.name, p=conv_vecarr(self.meca.position),
t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
self.net.add_nodes_from(self.node.nodes(data=True))
self.sim.activate(self.meca, self.meca.move(), 0.0)
self.PN = self.net.node[self.ID]['PN']
# Communication init
if args['comm_mode'] == 'synchro' and args['network']:
# The TOA requests are made every refreshTOA time ( can be modified in agent.ini)
# This Mode will be deprecated in future version
self.rxr = RX(net=self.net,
ID=self.ID,
dcond=self.dcond,
gcom=self.gcom,
sim=self.sim)
self.rxt = RX(net=self.net,
ID=self.ID,
dcond=self.dcond,
gcom=self.gcom,
sim=self.sim)
self.sim.activate(self.rxr, self.rxr.refresh_RSS(), 0.0)
self.sim.activate(self.rxt, self.rxt.refresh_TOA(), 0.0)
elif args['comm_mode'] == 'autonomous' and args['network']:
# The requests are made by node only when they are in
# visibility of pairs.
# self.rxr only manage a refresh RSS process
self.rxr = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
# self.tx manage all requests to other nodes
self.tx = TX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
# self.tx replies to requests from self.tx
self.rx = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
self.sim.activate(self.rxr, self.rxr.refresh_RSS(), 0.0)
self.sim.activate(self.tx, self.tx.request(), 0.0)
self.sim.activate(self.rx, self.rx.wait_request(), 0.0)
elif self.typ == 'ap':
if args['roomId'] == -1:
self.node = Node(ID=self.ID, p=self.args['pos'],
t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
else:
pp = np.array(args['L'].Gr.pos[self.args['roomId']])
self.node = Node(
ID=self.ID, p=pp, t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
self.net.add_nodes_from(self.node.nodes(data=True))
self.sim = args['sim']
self.PN = self.net.node[self.ID]['PN']
self.PN.node[self.ID]['pe'] = self.net.node[self.ID]['p']
if args['comm_mode'] == 'autonomous' and args['network']:
self.rx = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
self.sim.activate(self.rx, self.rx.wait_request(), 0.0)
p = self.args['pos']
self.posdf = pd.DataFrame(
{'t': pd.Timestamp(0), 'x': p[0], 'y': p[1], 'z': p[2],
'vx': np.array([0.0]), 'vy': np.array([0.0]),
'ax': np.array([0.0]), 'ay': np.array([0.0]),
}, columns=['t', 'x', 'y', 'z', 'vx', 'vy', 'ax', 'ay'], index=np.array([0]))
else:
raise NameError(
'wrong agent typ, it must be either agent (ag) or acces point (ap) ')
if self.typ == 'ap':
self.MoA = 1
else:
self.MoA = 0
if 'mysql' in args['save']:
config = ConfigParser.ConfigParser()
config.read(pyu.getlong('simulnet.ini', 'ini'))
sql_opt = dict(config.items('Mysql'))
db = Database(sql_opt['host'], sql_opt['user'],
sql_opt['passwd'], sql_opt['dbname'])
db.writenode(self.ID, self.name, self.MoA)
if 'txt' in args['save']:
pyu.writenode(self)
if self.typ != 'ap' and args['loc']:
self.loc = Localization(net=self.net, ID=self.ID,
method=args['loc_method'])
self.Ploc = PLocalization(loc=self.loc,
loc_updt_time=args['loc_updt'],
tx=self.tx,
sim=args['sim'])
self.sim.activate(self.Ploc, self.Ploc.run(), 1.5)
def __repr__(self):
s = 'General Agent info \n********************\n'
s = s + 'name : ' + self.name + '\n'
s = s + 'ID: ' + self.ID + '\n'
s = s + 'typ: ' + self.typ
s = s + '\n\n More Agent information about:'
s = s + '\n+ Mecanichal => self.meca'
s = s + '\n+ Network => self.net'
s = s + '\n+ Personnal Network => self.PN'
s = s + '\n+ Localization => self.loc\n\n'
try:
s = s + self.PN.__repr__() + '\n\n'
except:
s = s + 'No network simulated'
if self.typ != 'ap':
s = s + self.meca.__repr__() + '\n\n'
try:
s = s + self.loc.__repr__() + '\n\n'
except:
s = s + 'no localization simulated'
return s
| lgpl-3.0 |
ghchinoy/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ilyes14/scikit-learn | sklearn/linear_model/least_angle.py | 61 | 54324 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
rustychris/stompy | stompy/model/suntans/sunreader.py | 1 | 147480 | from __future__ import print_function
from __future__ import division
# These were added by 2to3, but seem to cause more issues than they solve
# from builtins import zip
# from builtins import str
# from builtins import map
# from builtins import range
# from builtins import object
# from past.utils import old_div
import re
import pprint
import sys, os, shutil, time
import hashlib, pickle
try:
try:
from osgeo import osr
except:
import osr
except ImportError:
print("GDAL unavailable")
osr = "unavaiable"
from numpy import ma
import glob,datetime
import subprocess
from . import transect
from ...grid import (trigrid,orthomaker)
from . import forcing
from ...spatial import field
import mmap
from numpy import *
from ...spatial.linestring_utils import upsample_linearring
try:
import pytz
utc = pytz.timezone('utc')
except ImportError:
print("couldn't load utc timezone")
utc = None
try:
from safe_pylab import *
import safe_pylab as pylab
except:
print("Plotting disabled")
from numpy.random import random
# configurable, per-user or host settings:
import local_config
# use absolute paths since we need to change directories to invoke vdatum
REALSIZE = 8
REALTYPE = float64
def read_bathymetry_offset():
## Figure out the constant offset in the bathymetry from NAVD88
import sys
comp_path = os.path.join( os.environ['HOME'], "classes/research/spatialdata/us/ca/suntans/bathymetry/compiled")
if comp_path not in sys.path:
sys.path.append(comp_path)
import depth_offset
bathymetry_offset = depth_offset.navd88_highwater
#print "Bathymetry has constant offset of %gm"%bathymetry_offset
return bathymetry_offset
def msl_to_navd88(lonlat_locs):
if msl_to_navd88.warning:
print(msl_to_navd88.warning)
msl_to_navd88.warning = None
return 0.938*ones( (lonlat_locs.shape[0],) )
# return apply_vdatum('LMSL','MLLW',lonlat_locs)
def apply_vdatum(src_vdatum,dest_vdatum,lonlat_locs):
""" given a vector of lon/lat yx pairs, return the height that must be added to MSL to get
a NAVD88 measurement
"""
tmp_in = "/tmp/tmpinput"
tmp_out = "/tmp/tmpoutput"
in_fp = open(tmp_in,'wt')
for i in range(lonlat_locs.shape[0]):
in_fp.write("%d, %f, %f, 0.0\n"%(i,lonlat_locs[i,0],lonlat_locs[i,1]))
in_fp.close()
vdatum_dir = local_config.vdatum_dir
command = "cd %s ; java VDatum -lonlat -heights -hin WGS84 -units meters -vin %s -vout %s %s %s"%(
vdatum_dir,
src_vdatum, dest_vdatum,
tmp_in,tmp_out)
# print "About to run vdatum command:"
# print command
res = subprocess.call(command,shell=True)
if res:
print("Something probably went wrong there. Returned ",res)
raise Exception("Subcommand vdatum failed")
offsets = zeros( (lonlat_locs.shape[0]), float64)
fp_out = open(tmp_out,'rt')
for i in range(lonlat_locs.shape[0]):
stupid_id,x,y,z = [float(s) for s in fp_out.readline().split(',')]
offsets[i] = z
fp_out.close()
return offsets
msl_to_navd88.warning = "WARNING: assuming constant 0.938m MSL-NAVD88"
import subprocess
import socket
class MPIrunner(object):
""" Try to figure out if we're running on mpich1 or mpich2
"""
def __init__(self,cmdlist,np=4,no_mpi_for_1=True,wait=1):
""" np: number of processors
no_mpi_for_for_1: if np==1 and this is true, run the command
directly without any mpi
Now tries to figure out if we're on bobstar, in which case submit
jobs via qsub
"""
mpi_version = None
if np == 1 and no_mpi_for_1:
mpi_version = 0
host = socket.gethostname()
# bobstar? (or other qsub based machine)
if mpi_version is None:
if host == 'head0.localdomain':
print("good luck - we're running on bobstar")
mpi_version = 'bobstar'
elif host.startswith('eddy.'):
print("looks like we're on eddy")
mpi_version = 'eddy'
elif host == 'sunfish':
print("Hi sunfish!")
mpi_version='sunfish'
elif host.startswith('trestles'):
print("hi trestles.")
# seems that trestles can run qsub from nodes
# now, so no need to test whether we're in
# a qsub job. (but this does not handle the
# case where the script is running inside
# a qsub job that already has the right np,
# and really just want to run mpirun_rsh right away..)
mpi_version = 'trestles'
# running locally on mac:
if mpi_version is None and sys.platform == 'darwin':
mpi_version = 'darwin'
# look for some direct ways to start it:
if mpi_version is None:
print("Trying versions of mpich")
for cmd,version in [('mpich2version',2),
('mpich2version',1),
('mpichversion.mpich-shmem',1)]:
try:
pipe = subprocess.Popen(cmd,stdout=subprocess.PIPE)
version_info = pipe.communicate()[0]
mpi_version = version
break
except OSError as exc:
#print "While trying to figure out the mpich version, the command %s"%cmd
#print "raised an exception"
#print exc
pass
# might be open mpi -
if mpi_version is None:
print("Trying Open MPI")
try:
pipe = subprocess.Popen(['mpirun','-V'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout_txt,stderr_txt = pipe.communicate()
if b"Open MPI" in stderr_txt + stdout_txt:
print("Found mpi is openmpi")
mpi_version="openmpi"
else:
print("OpenMPI mpirun printed something else")
print(stderr_txt)
print(stdout_txt)
except OSError as exc:
print("While checking for Open MPI, the command %s"%cmd)
print("raised an exception")
print(exc)
if mpi_version is None:
raise Exception("MPI wasn't found")
print("Using mpi version %s"%mpi_version)
print(" PWD: ",os.getcwd())
print(" np: ",np)
print(" command: "," ".join(cmdlist))
def must_wait():
if not wait:
print("Sorry - have to wait on non-qsub platforms")
if mpi_version == 0:
must_wait()
subprocess.call(cmdlist)
elif mpi_version == 1:
must_wait()
if host=='marvin':
mpirun = "mpirun.mpich-shmem"
else:
mpirun = "mpirun"
subprocess.call([mpirun,'-np','%d'%np] + cmdlist)
elif mpi_version == 2:
must_wait()
subprocess.call(['mpiexec','-np','%d'%np] + cmdlist)
elif mpi_version == 'darwin':
must_wait()
subprocess.call(['mpiexec','-np','%d'%np,'-host','localhost'] + cmdlist)
elif mpi_version =='bobstar':
self.run_bobstar(np,cmdlist,wait=wait)
elif mpi_version == 'eddy':
self.run_eddy(np,cmdlist,wait=wait)
elif mpi_version in ['trestles']:
self.run_trestles(np,cmdlist,wait=wait,mpi_version=mpi_version)
elif mpi_version=='sunfish':
self.run_sunfish(np,cmdlist,wait=wait)
elif mpi_version=='openmpi':
must_wait()
print("Running via openmpi")
subprocess.call(['mpirun','-n','%d'%np] + cmdlist)
else:
raise Exception("Failed to find a way to run this! mpi version is %s"%mpi_version)
def run_bobstar(self,np,cmdlist,wait=True):
self.run_qsub(np,cmdlist,
ppn=8,walltime=None,
wait=wait)
def run_sunfish(self,np,cmdlist,wait=True):
self.run_qsub(np,cmdlist,ppn=8,walltime="24:00:00",wait=wait,
mpi_template="mpirun -np %(np)i",)
def run_eddy(self,np,cmdlist,wait=True):
if 0: # the more standard but old way on eddy:
# eddy policy is 24 hour jobs
self.run_qsub(np,cmdlist,
ppn=8,walltime='24:00:00',
mpi_template='/usr/bin/mpirun -np %(np)i',
wait=wait)
else:
self.run_qsub(np,cmdlist,
ppn=8,walltime='24:00:00',
mpi_template='/home/rusty/bin/mpiexec --comm pmi -n %(np)i',
wait=wait)
def run_trestles(self,np,cmdlist,wait=True,mpi_version='trestles'):
""" trestles has 32 cores per node, but they also have a shared queue
which allows for using a smaller number of cores (and getting charged
for the smaller number). it defaults to 48 hours, but I think that
it charges for all of walltime, even if the job finishes early (annoying).
for testing, force walltime = 1.0
"""
if np > 1:
mpi_template="mpirun_rsh -np %(np)i -hostfile $PBS_NODEFILE"
else:
# sometimes run serial jobs via qsub to have more resources
mpi_template=""
if np < 32:
# use one node from the shared queue
q = 'shared'
ppn = np
else:
q = 'normal'
ppn = 32
# just guess if not given...
walltime=os.environ.get('WALLTIME','1:00:00')
self.run_qsub(np,cmdlist,
ppn=ppn,walltime=walltime,save_script=False,
queue=q,
mpi_template=mpi_template, # "mpirun_rsh -np %(np)i -hostfile $PBS_NODEFILE",
wait=wait)
def run_qsub(self,np,cmdlist,ppn=8,walltime=None,save_script=False,
queue=None,
mpi_template="mpirun -np %(np)i -machine vapi",
wait=True):
""" Simple wrapper for qsub -
note: some systems have a very small default walltime but allow for
arbitrarily large walltime, while others have a generous default but
don't allow anything bigger.
save_script: if true, write the script out to qsub_script.sh, and exit.
wait: if False queue and it return, otherwise watch qstat for it to finish.
"""
print("submitting job: walltime will be: ",walltime)
# first figure out how many nodes:
nnodes = int( (np-1) / ppn + 1 )
if walltime is None:
walltime = ""
else:
walltime = ",walltime=%s"%walltime
if queue is not None:
qtxt ="#PBS -q %s\n"%queue
else:
qtxt ="# no queue specified"
mpi_command = mpi_template%locals()
script = """#!/bin/bash
%s
#PBS -N rusty
#PBS -l nodes=%i:ppn=%i%s
#PBS -V
DISPLAY=""
export DISPLAY
cd %s
echo Run started at `date`
time %s %s
echo Run ended at `date`
"""%(qtxt,nnodes,ppn,walltime,os.getcwd(),mpi_command," ".join(cmdlist))
if save_script:
fp = open("qsub_script.sh","wt")
fp.write(script)
fp.close()
sys.exit(1)
print("------Script------")
print(script)
print("------------------")
# it would be nice to run it interactively and just wait, but
# qsub says we have to be on a terminal to do that (and I think
# that interactive jobs also don't run the script).
# so capture the job number from qsub:
# 9552.scyld.localdomain
proc = subprocess.Popen(['qsub'],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = proc.communicate( script )
job_id = out.strip()
print("qsub returned with code %s. job id is '%s'"%(proc.returncode,job_id))
if job_id=="":
print("Didn't get a job id back from qsub.")
print("stdout was:")
print(out)
print("stderr was:")
print(err)
return
if not wait:
print("Not waiting. job id is %s"%job_id)
return
# poll the job via qstat
# increase sleep time as we go
sleep_time = 5.0
sleep_r = 1.2
sleep_time_max = 600
last_status = None
while 1:
proc = subprocess.Popen(['qstat',job_id],stdout=subprocess.PIPE)
out,err = proc.communicate()
print("qstat returned: ",out)
lines = out.split("\n")
if len(lines)>=3:
# print "Job status line is ",lines[2]
stat_line = lines[2]
status = stat_line.split()[4]
print('status is ',status)
else:
print("Assuming that no response means that it's finished")
status = 'C'
if status == 'C':
print("Completed!")
break
else:
if last_status == status:
sleep_time = min( sleep_r * sleep_time, sleep_time_max)
else:
sleep_time = 5.0
last_status = status
print("sleeping %f seconds"%sleep_time)
time.sleep( sleep_time )
class SuntansCrash(object):
horizontal_courant = 1
vertical_courant = 2
def site(self):
g = self.sun.grid(processor = self.processor)
if self.description == SuntansCrash.vertical_courant:
return g.vcenters()[self.cell_id]
else:
endpoints = g.points[g.edges[self.edge_id,:2],:2]
return endpoints.mean(axis=0) # compute midpoint
def plot(self):
# plot the location of the cell on top of the
# bathymetry, and with a second plot showing the
# stage of the nearest profile point
# plotting a crash site
self.sun.plot_bathymetry(procs = [self.processor] )
site = self.site()
plot( [site[0]],[site[1]],'ro' )
annotate('Offender',site)
def closest_profile_point(self):
site = self.site()
pnts = self.sun.profile_points()
dists = sqrt( (pnts[:,0]-site[0])**2 + (pnts[:,1]-site[1])**2 )
best = argmin(dists)
print("Closest profile point is %g away"%dists[best])
self.closest_profile_location = pnts[best]
self.closest_profile_index = best
return best
def plot_nearest_freesurface(self):
prof_point = self.closest_profile_point()
prof_data = self.sun.profile_data('FreeSurfaceFile')
point_data = prof_data[:,prof_point]
prof_time = self.sun.timeline(units='days',output='profile')
plot(prof_time,point_data)
def full_plot(self):
clf()
subplot(212)
self.plot_nearest_freesurface()
subplot(211)
self.plot_vicinity()
def plot_vicinity(self):
self.plot()
self.closest_profile_point()
p = self.closest_profile_location
plot( [p[0]],[p[1]],'bo')
annotate('Profile loc',p)
def depth_info(self):
if self.description == self.horizontal_courant:
print("Horizontal courant number violation")
print("edge_id: ",self.edge_id)
g = self.sun.grid(processor = self.processor)
cells = g.edges[self.edge_id,3:5]
elif self.description == self.vertical_courant:
print("Vertical courant number violation")
print("cell_id: ",self.cell_id)
print("dz: ",self.dz)
cells = [self.cell_id]
else:
print("Unknown crash type ",self.description)
print("Z-level of crash: ",self.z_id)
fs = self.storefile().freesurface()
for c in cells:
print("Cell %i"%c)
print("Depth[cell=%i]: %g"%(c,self.sun.depth_for_cell(c,self.processor)))
if self.z_id == 0:
print("Top of this z-level: 0.0")
else:
print("Top of this z_level: %g"%(self.sun.z_levels()[self.z_id-1]))
print("Bottom of this z_level: %g"%(self.sun.z_levels()[self.z_id]))
print("Freesurface[cell=%i]: %g"%(c,fs[c]))
_storefile = None
def storefile(self):
if self._storefile is None:
self._storefile = StoreFile(self.sun, self.processor)
return self._storefile
def spin_mapper(spin_sun,target_sun,target_absdays,scalar='salinity',nonnegative=1):
""" return a function that takes a proc, cell and k-level from target_sun
and returns a scalar value from the spin_sun.
the timestep will be chosen to be as close as possible to the starting
time of target_sun
"""
### figure out the step to use:
# the time of this storefile:
spin_times = date2num( spin_sun.time_zero()) + spin_sun.timeline(units='days')
step = argmin( abs(spin_times-target_absdays) )
print("Difference between target time and spinup time: %gs"%( 24*3600*(target_absdays - spin_times[step]) ))
### map target z-levels to the spin_sun z-levels
z_levels = target_sun.z_levels()
spin_z_levels = spin_sun.z_levels()
# could be refined some, to use middles of the cells
k_to_spin_k = searchsorted(spin_z_levels,z_levels)
### as needed, figure out how our cells map onto the
# processors and cells of the spin_run. Since we
# may not need all of the spin_sun's domains,
# do this dynamically
proc_cell_to_spin_cell = {} # (target_proc,target_cell) -> (spin_proc,spin_cell_id)
cell_salinity = {} # spin proc -> salt[spin_c,spin_k]
grids = [None] * target_sun.num_processors()
spin_ctops = [None] * spin_sun.num_processors()
def mapper(p,c,k):
# populate per processor things as needed
if grids[p] is None:
grids[p] = target_sun.grid(p)
# first figure out the spin-cell
if (p,c) not in proc_cell_to_spin_cell:
# The real way:
proc_cell_to_spin_cell[(p,c)] = spin_sun.closest_cell(grids[p].vcenters()[c])
spin_proc,spin_c = proc_cell_to_spin_cell[(p,c)]
# map my k onto the spinup-k:
try:
spin_k = k_to_spin_k[k]
except:
print("Ran into trouble while trying to map p=%d c=%d k=%d"%(p,c,k))
print("through the map ")
print(k_to_spin_k)
raise
# get the ctop values for the spinup run:
if spin_ctops[spin_proc] is None:
spin_ctops[spin_proc] = spin_sun.ctop(spin_proc,step)
# print "Read ctops for spinup: spin_proc=%d length=%s\n"%(spin_proc,spin_ctops[spin_proc].shape)
# how deep is that cell?
spin_nk = spin_sun.Nk(spin_proc)[spin_c]
# if the spinup cell is dry, pull from the highest wet cell
try:
if spin_k < spin_ctops[spin_proc][spin_c]:
spin_k = spin_ctops[spin_proc][spin_c]
except IndexError:
print("Trouble with production run (%d,%d,%d) mapping to spinup (%d,%d,??) shaped %s"%(p,c,k,spin_proc,spin_c,spin_ctops[spin_proc].shape))
raise
# if it's too deep, truncate
if spin_k >= spin_nk:
spin_k = spin_nk - 1
if spin_proc not in cell_salinity:
g,cell_salinity[spin_proc] = spin_sun.cell_salinity(spin_proc,step)
s = cell_salinity[spin_proc][spin_c,spin_k]
if nonnegative and s<0.0:
s=0.0
return s
return mapper
class SediStoreFile(object):
""" Like StoreFile, but for reading/writing sediment restart data
"""
hdr_bytes=3*4
@staticmethod
def filename(sun,processor,startfile=0):
# this is currently hardcoded in sedi.c and phys.c - so we have to hardcode here.
if startfile:
return os.path.join(sun.datadir,'start_sedi.dat.%d'%processor)
else:
return os.path.join(sun.datadir,'store_sedi.dat.%d'%processor)
def __init__(self,sun,processor,startfile=0):
self.sun = sun
self.proc = processor
self.grid = sun.grid(processor)
self.fn = self.filename(sun,processor,startfile)
self.fp = open(self.fn,'rb+')
# pre-compute strides
self.stride_Nc = self.grid.Ncells()
self.stride_Nc_Nk = self.sun.Nk(self.proc).sum()
# read the header info:
header = fromstring(self.fp.read(self.hdr_bytes),int32)
self.timestep = header[0]
self.nclasses = header[1]
self.nlayers = header[2]
def seek_ssc(self,species):
self.fp.seek(self.hdr_bytes + species* self.stride_Nc_Nk * REALSIZE)
def seek_bed(self,layer):
self.fp.seek(self.hdr_bytes + self.nclasses*self.stride_Nc_Nk * REALSIZE \
+ layer * self.stride_Nc)
def read_ssc(self,species):
self.seek_ssc(species)
return fromstring( self.fp.read(self.stride_Nc_Nk * REALSIZE), REALTYPE)
def read_bed(self,layer):
self.seek_bed(layer)
return fromstring( self.fp.read(self.stride_Nc * REALSIZE), REALTYPE)
def write_ssc(self,species,ssc):
data = ssc.astype(REALTYPE)
self.seek_ssc(species)
self.fp.write( data.tostring() )
self.fp.flush()
def write_bed(self,layer,bed):
data = bed.astype(REALTYPE)
self.seek_bed(layer)
self.fp.write( data.tostring() )
self.fp.flush()
def overwrite_ssc(self,func,species):
""" iterate over all the cells and set the ssc
in each one, overwriting the existing ssc data
signature for the function func(cell id, k-level)
"""
# read the current data:
ssc = self.read_ssc(species)
i = 0 # linear index into data
Nk = self.sun.Nk(self.proc)
for c in range(self.grid.Ncells()):
for k in range(Nk[c]):
ssc[i] = func(self.proc,c,k)
i+=1
self.write_ssc(species,ssc)
def overwrite_bed(self,func,layer):
bed = self.read_bed(layer)
for c in range(self.grid.Ncells()):
bed[c] = func(self.proc,c,layer)
self.write_bed(layer,bed)
def close(self):
self.fp.close()
self.fp = None
class StoreFile(object):
""" Encapsulates reading of store.dat files, either for restarts or
for crashes
New: support for overwriting portions of a storefile
"""
def __init__(self,sun,processor,startfile=0,filename=None):
""" startfile: if true, choose filename using StartFile
instead of StoreFile
"""
self.sun = sun
self.proc = processor
if filename is None:
if startfile:
self.fn = self.sun.file_path('StartFile',self.proc)
else:
self.fn = self.sun.file_path('StoreFile',self.proc)
else:
self.fn=filename
self.fp = open(self.fn,'rb+')
# lazy loading of the strides, in case we just want the
# timestep
self.blocks_initialized = False
def initialize_blocks(self):
if not self.blocks_initialized:
# all data is lazy-loaded
self.grid = self.sun.grid(self.proc)
# pre-compute strides
Nc = self.grid.Ncells()
Ne_Nke = self.sun.Nke(self.proc).sum()
Nc_Nk = self.sun.Nk(self.proc).sum()
Nc_Nkp1 = (self.sun.Nk(self.proc) + 1).sum()
# and define the structure of the file:
blocks = [
['timestep', int32, 1],
['freesurface', REALTYPE, Nc],
['ab_hor_moment', REALTYPE, Ne_Nke],
['ab_vert_moment', REALTYPE, Nc_Nk],
['ab_salinity', REALTYPE, Nc_Nk],
['ab_temperature', REALTYPE, Nc_Nk],
['ab_turb_q', REALTYPE, Nc_Nk],
['ab_turb_l', REALTYPE, Nc_Nk],
['turb_q', REALTYPE, Nc_Nk],
['turb_l', REALTYPE, Nc_Nk],
['nu_t', REALTYPE, Nc_Nk],
['K_t', REALTYPE, Nc_Nk],
['u', REALTYPE, Ne_Nke],
['w', REALTYPE, Nc_Nkp1],
['p_nonhydro', REALTYPE, Nc_Nk],
['salinity', REALTYPE, Nc_Nk],
['temperature', REALTYPE, Nc_Nk],
['bg_salinity', REALTYPE, Nc_Nk]]
# and then rearrange to get block offsets and sizes ready for reading
block_names = [b[0] for b in blocks]
block_sizes = array( [ ones(1,b[1]).itemsize * b[2] for b in blocks] )
block_offsets = block_sizes.cumsum() - block_sizes
expected_filesize = block_sizes.sum()
actual_filesize = os.stat(self.fn).st_size
if expected_filesize != actual_filesize:
raise Exception("Mismatch in filesize: %s != %s"%(expected_filesize, actual_filesize))
self.block_names = block_names
self.block_sizes = block_sizes
self.block_offsets = block_offsets
self.block_types = [b[1] for b in blocks]
self.blocks_initialized = True
def close(self):
self.fp.close()
self.fp = None
def read_block(self,label):
# special handling for timestep - can skip having to initialized
# too much
if label == 'timestep':
self.fp.seek(0)
s = self.fp.read( 4 )
return fromstring( s, int32 )
else:
# 2014/7/13: this line was missing - not sure how it ever
# worked - or if this is incomplete code??
self.initialize_blocks()
i = self.block_names.index(label)
self.fp.seek( self.block_offsets[i] )
s = self.fp.read( self.block_sizes[i] )
return fromstring( s, self.block_types[i] )
def write_block(self,label,data):
i = self.block_names.index(label)
self.fp.seek( self.block_offsets[i] )
data = data.astype(self.block_types[i])
self.fp.write( data.tostring() )
self.fp.flush()
def timestep(self):
return self.read_block('timestep')[0]
def time(self):
""" return a datetime corresponding to our timestep """
return self.sun.time_zero() + datetime.timedelta( self.timestep() * self.sun.conf_float('dt')/(24.*3600) )
def freesurface(self):
return self.read_block('freesurface')
def u(self):
blk = self.read_block('u')
Nke = self.sun.Nke(self.proc)
Nke_cumul = Nke.cumsum() - Nke
full_u = nan*ones( (self.grid.Nedges(),Nke.max()) )
for e in range(self.grid.Nedges()):
full_u[e,0:Nke[e]] = blk[Nke_cumul[e]:Nke_cumul[e]+Nke[e]]
return full_u
def overwrite_salinity(self,func):
""" iterate over all the cells and set the salinity
in each one, overwriting the existing salinity data
signature for the function func(cell id, k-level)
"""
# read the current data:
salt = self.read_block('salinity')
# for starters, just call out to func once for each
# cell, but pass it the cell id so that func can
# cache locations for each grid cell.
i = 0 # linear index into data
Nk = self.sun.Nk(self.proc)
for c in range(self.grid.Ncells()):
for k in range(Nk[c]):
salt[i] = func(self.proc,c,k)
i+=1
self.write_block('salinity',salt)
def overwrite_temperature(self,func):
""" iterate over all the cells and set the temperature
in each one, overwriting the existing salinity data
signature for the function func(proc, cell id, k-level)
"""
# read the current data:
temp = self.read_block('temperature')
# for starters, just call out to func once for each
# cell, but pass it the cell id so that func can
# cache locations for each grid cell.
i = 0 # linear index into data
Nk = self.sun.Nk(self.proc)
for c in range(self.grid.Ncells()):
for k in range(Nk[c]):
temp[i] = func(self.proc,c,k)
i+=1
self.write_block('temperature',temp)
def copy_salinity(self,spin_sun):
""" Overwrite the salinity record with a salinity record
taken from the spin_sun run. Step selects which full-grid
output to use, or if not specified choose the step
closest to the time of this storefile.
"""
### figure out the step to use:
# the time of this storefile:
my_absdays = date2num( sun.time() )
mapper = spin_mapper(spin_sun,
self.sun,
my_absdays,scalar='salinity')
self.overwrite_salinity(mapper)
# the rest of the fields still need to be implemented, but the pieces are mostly
# here. see SunReader.read_cell_z_level_scalar() for a good start, and maybe
# there is a nice way to refactor this and that.
class GenericConfig(object):
""" Handles reading and writing of suntans.dat formatted files.
"""
def __init__(self,filename=None,text=None):
""" filename: path to file to open and parse
text: a string containing the entire file to parse
"""
self.filename = filename
if filename:
fp = open(filename,'rt')
else:
fp = [s+"\n" for s in text.split("\n")]
self.entries = {}
self.originals = []
for line in fp:
# save original text so we can write out a new suntans.dat with
# only minor changes
self.originals.append(line)
i = len(self.originals)-1
m = re.match("^\s*((\S+)\s+(\S+))?\s*.*",line)
if m and m.group(1):
key = m.group(2).lower()
val = m.group(3)
self.entries[key] = [val,i]
if filename:
fp.close()
def conf_float(self,key):
return self.conf_str(key,float)
def conf_int(self,key,default=None):
x=self.conf_str(key,int)
if x is None:
return default
return x
def conf_str(self,key,caster=lambda x:x):
key = key.lower()
if key in self.entries:
return caster(self.entries[key][0])
else:
return None
def __setitem__(self,key,value):
self.set_value(key,value)
def __getitem__(self,key):
return self.conf_str(key)
def __delitem__(self,key):
# if the line already exists, it will be written out commented, otherwise
# it won't be written at all.
self.set_value(key,None)
def __eq__(self,other):
return self.is_equal(other)
def is_equal(self,other,limit_to_keys=None):
# key by key equality comparison:
print("Comparing two configs")
for k in self.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in other.entries:
print("Other is missing key %s"%k)
return False
elif self.val_to_str(other.entries[k][0]) != self.val_to_str(self.entries[k][0]):
print("Different values key %s => %s, %s"%(k,self.entries[k][0],other.entries[k][0]))
return False
for k in other.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in self.entries:
print("other has extra key %s"%k)
return False
return True
def disable_value(self,key):
key = key.lower()
if key not in self.entries:
return
old_val,i = self.entries[key]
self.originals[i] = "# %s"%(self.originals[i])
self.entries[key][0] = None
def val_to_str(self,value):
# make sure that floats are formatted with plenty of digits:
# and handle annoyance of standard Python types vs. numpy types
# But None stays None, as it gets handled specially elsewhere
if value is None:
return None
if isinstance(value,float) or isinstance(value,floating):
value = "%.12g"%value
else:
value = str(value)
return value
def set_value(self,key,value):
""" Update a value in the configuration. Setting an item to None will
comment out the line if it already exists, and omit the line if it does
not yet exist.
"""
key = key.lower()
if key not in self.entries:
if value is None:
return
self.originals.append("# blank #")
i = len(self.originals) - 1
self.entries[key] = [None,i]
old_val,i = self.entries[key]
value = self.val_to_str(value)
if value is not None:
self.originals[i] = "%s %s # from sunreader code\n"%(key,value)
else:
self.originals[i] = "# " + self.originals[i]
self.entries[key][0] = value
def write_config(self,filename=None,check_changed=True,backup=True):
"""
Write this config out to a text file
filename: defaults to self.filename
check_changed: if True, and the file already exists and is not materially different,
then do nothing. Good for avoiding unnecessary changes to mtimes.
backup: if true, copy any existing file to <filename>.bak
"""
filename = filename or self.filename
if filename is None:
raise Exception("No clue about the filename for writing config file")
if check_changed:
if os.path.exists(filename):
existing_conf = self.__class__(filename)
if existing_conf == self:
print("No change in config")
return
if os.path.exists(filename) and backup:
filename_bak = filename + ".bak"
os.rename(filename,filename_bak)
fp = open(filename,'wt')
for line in self.originals:
fp.write(line)
fp.close()
class SediLsConfig(GenericConfig):
def __init__(self,filename=None,text=None):
if filename is None and text is None:
text = """
########################################################################
#
# Default input file for the L. Sanford based sediment module in SUNTANS.
# see LS2008 for variable meanings
########################################################################
r_consolidate 3.4722e-5 # 3.0 / 86400. # [1/s]
r_swell 3.4722e-7 # 3.0 / 86400 / 100; # [1/s]
beta 1.36e-4 # [m/s/Pa]
gamma0 0.002 # [-]
rho_solid 2650 # dry weight [kg/m3]
tau_cs 0.125 # [Pa] critical stress of erosion for sand
tau_c_min 0.02 # [Pa] - minimum floc critical stress for erosion
phi_mud_solid_min 0.03 # mud solids fraction [-]
Nspecies 2 # Number of sediment species
# Settling velocities - positive is rising
ws000 -0.0005
ws001 -0.01
flags000 SED_MUD
flags001 SED_SAND
bed_fraction000 0.9
bed_fraction001 0.1
Nlayers 26
dm000 0.05 # nominal mass in each layer [kg/m2]
dm001 0.05
dm002 0.05
dm003 0.05
dm004 0.05
dm005 0.05
dm006 0.05
dm007 0.05
dm008 0.05
dm009 0.05
dm010 0.05
dm011 0.05
dm012 0.05
dm013 0.05
dm014 0.05
dm015 0.05
dm016 0.05
dm017 0.05
dm018 0.05
dm019 0.05
dm020 0.05
dm021 0.05
dm022 0.05
dm023 0.05
dm024 0.05
dm025 3.75
"""
super(SediLsConfig,self).__init__(filename=filename,text=text)
class SediConfig(GenericConfig):
def __init__(self,filename=None,text=None):
if filename is None and text is None:
text = """
########################################################################
#
# Default input file for the sediment module in SUNTANS.
#
########################################################################
NL 1 # Number of bed layers (MAX = 5)
spwght 2.65 # Specific weight of the sediment particle
diam 0.0001 # Mean diameter of the sediment particle (m)
gamma 0.2 # Coefficient for flocculated settling velcoity
Chind 0.1 # Concentration (in volumetric fraction) criterion for hindered settling velocity
Cfloc 0.02 # Concentration (in volumetric fraction) criterion for flocculated settling velcoity
k 0.0002 # Constant coefficient for settling velocity as a function of conc.
Kb 0.001 # Bottom length scale
Kagg 0.15 # Aggregation coefficient (dimensionless)
Kbrk 0.0002 # Break-up coefficient (dimensionless)
Fy 0.0000000001 #Yield strength (N)
nf 2.5 #Fractal dimension (0~3, usually 2~3)
q 0.5 #Constant coefficient in the breakup formulation
Nsize 2 #Number of the size classes
diam_min 0.00008 #Minimum sediment size
diam_max 0.00024 #Maximum sediment size
Dp 0.000004 #Diameter of the primary particle (in m)
Dl1 180000 # Dry density (g/m^3)
Tcsl1 0.06 # Critical Shear Stress (N/m^2)
E1 0.03 # Erosion Rate Constant (g/m^2/s)
alpha1 6.5 # Empirical coef. for the erosion rate
cnsd1 0.001 # Consolidation rate (g/m^2/s)
hl1 1.00 # Layer thickness (m)
"""
super(SediConfig,self).__init__(filename=filename,text=text)
# the current process for handling dates uses pylab datenums, but since these
# have units of days, and days have a non-power of 2 number of seconds in them,
# it can't represent times accurately. the error is usually on the order of
# 10 us - this parameter describes the expected resolution of values which
# have been converted to floating point - and where possible the conversion
# back from floating point should round at this precision
# to make it a bit easier to know that the arithmetic will be exact, this
# is specified as an integer inverse of the precision
datenum_precision_per_s = 100 # 10ms - should be evenly divisible into 1e6
def dt_round(dt):
""" Given a datetime or timedelta object, round it to datenum_precision
"""
if isinstance(dt,datetime.timedelta):
td = dt
# days are probably fine
dec_seconds = td.seconds + 1e-6 * td.microseconds
# the correct number of time quanta
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000 // datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
return datetime.timedelta( days=td.days,
seconds = new_seconds,
microseconds = new_microseconds )
else:
# same deal, but the fields have slightly different names
# And the integer arithmetic cannot be used to count absolute seconds -
# that will overflow 32-bit ints (okay with 64, but better not
# to assume 64-bit ints are available)
dec_seconds = dt.second + 1e-6 * dt.microsecond
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000// datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
# to handle the carries between microseconds, seconds, days,
# construct an exact timedelta object - also avoids having to do
# int arithmetic with seconds over many days, which could overflow.
td = datetime.timedelta(seconds = new_seconds - dt.second,
microseconds = new_microseconds - dt.microsecond)
return dt + td
class SunConfig(GenericConfig):
def time_zero(self):
""" return python datetime for the when t=0 is"""
# try the old way, where these are separate fields:
start_year = self.conf_int('start_year')
start_day = self.conf_float('start_day')
if start_year is not None:
# Note: we're dealing with 0-based start days here.
start_datetime = datetime.datetime(start_year,1,1,tzinfo=utc) + dt_round(datetime.timedelta(start_day))
return start_datetime
# That failed, so try the other way
print("Trying the new way of specifying t0")
s = self.conf_str('TimeZero') # 1999-01-01-00:00
start_datetime = datetime.datetime.strptime(s,'%Y-%m-%d-%H:%M')
start_datetime = start_datetime.replace(tzinfo=utc)
return start_datetime
def simulation_seconds(self):
return self.conf_float('dt') * self.conf_int('nsteps')
def timestep(self):
""" Return a timedelta object for the timestep - should be safe from roundoff.
"""
return dt_round( datetime.timedelta(seconds=self.conf_float('dt')) )
def simulation_period(self):
""" This is more naive than the SunReader simulation_period(), in that
it does *not* look at any restart information, just start_year, start_day,
dt, and nsteps
WARNING: this used to add an extra dt to start_date - maybe trying to make it
the time of the first profile output?? this seems like a bad idea. As of
Nov 18, 2012, it does not do that (and at the same time, moves to datetime
arithmetic)
return a pair of python datetime objects for the start and end of the simulation.
"""
t0 = self.time_zero()
# why did it add dt here???
# start_date = t0 + datetime.timedelta( self.conf_float('dt') / (24.*3600) )
# simulation_days = self.simulation_seconds() / (24.*3600)
# end_date = start_date + datetime.timedelta(simulation_days)
start_date = t0
end_date = start_date + self.conf_int('nsteps')*self.timestep()
return start_date,end_date
def copy_t0(self,other):
self.set_value('start_year',other.conf_int('start_year'))
self.set_value('start_day',other.conf_float('start_day'))
def set_simulation_period(self,start_date,end_date):
""" Based on the two python datetime instances given, sets
start_day, start_year and nsteps
"""
self.set_value('start_year',start_date.year)
t0 = datetime.datetime( start_date.year,1,1,tzinfo=utc )
self.set_value('start_day',date2num(start_date) - date2num(t0))
# roundoff dangers here -
# self.set_simulation_duration_days( date2num(end_date) - date2num(start_date))
self.set_simulation_duration(delta=(end_date - start_date))
def set_simulation_duration_days(self,days):
self.set_simulation_duration(days=days)
def set_simulation_duration(self,
days=None,
delta=None,
seconds = None):
""" Set the number of steps for the simulation - exactly one of the parameters should
be specified:
days: decimal number of days - DANGER - it's very easy to get some round-off issues here
delta: a datetime.timedelta object.
hopefully safe, as long as any differencing between dates was done with UTC dates
(or local dates with no daylight savings transitions)
seconds: total number of seconds - this should be safe, though there are some possibilities for
roundoff.
"""
print("Setting simulation duration:")
print(" days=",days)
print(" delta=",delta)
print(" seconds=",seconds)
# convert everything to a timedelta -
if (days is not None) + (delta is not None) + (seconds is not None) != 1:
raise Exception("Exactly one of days, delta, or seconds must be specified")
if days is not None:
delta = datetime.timedelta(days=days)
elif seconds is not None:
delta = datetime.timedelta(seconds=seconds)
# assuming that dt is also a multiple of the precision (currently 10ms), this is
# safe
delta = dt_round(delta)
print(" rounded delta = ",delta)
timestep = dt_round(datetime.timedelta(seconds=self.conf_float('dt')))
print(" rounded timestep =",timestep)
# now we have a hopefully exact simulation duration in integer days, seconds, microseconds
# and a similarly exact timestep
# would like to do this:
# nsteps = delta / timestep
# but that's not supported until python 3.3 or so
def to_quanta(td):
""" return integer number of time quanta in the time delta object
"""
us_per_quanta = 1000000 // datenum_precision_per_s
return (td.days*86400 + td.seconds)*datenum_precision_per_s + \
int( round( td.microseconds/us_per_quanta) )
quanta_timestep = to_quanta(timestep)
quanta_delta = to_quanta(delta)
print(" quanta_timestep=",quanta_timestep)
print(" quanta_delta=",quanta_delta)
nsteps = quanta_delta // quanta_timestep
print(" nsteps = ",nsteps)
# double-check, going back to timedelta objects:
err = nsteps * timestep - delta
self.set_value('nsteps',int(nsteps))
print("Simulation duration requires %i steps (rounding error=%s)"%(self.conf_int('nsteps'),err))
def is_grid_compatible(self,other):
""" Compare two config's, and return False if any parameters which would
affect grid partitioning/celldata/edgedata/etc. are different.
Note that differences in other input files can also cause two grids to be different,
esp. vertspace.dat
"""
# keep all lowercase
keys = ['nkmax',
'stairstep',
'rstretch',
'correctvoronoi',
'voronoiratio',
'vertgridcorrect',
'intdepth',
'pslg',
'points',
'edges',
'cells',
'depth',
# 'vertspace.dat.in' if rstretch==0
'topology.dat',
'edgedata',
'celldata',
'vertspace.dat']
return self.is_equal(other,limit_to_keys=keys)
class SunReader(object):
"""
Encapsulates reading of suntans output data
"""
sun_exec = local_config.sun_exec
EMPTY = 999999
def __init__(self,datadir='.'):
self.datadir = datadir
self.load_suntans_dat()
# lazy-load
self._step_data = None
self._profile_points = None
self._starting_time_step = None
self._profile_data = {}
self._z_levels = None
# per processor stuff
self._grids = {}
self._topos = {}
self._bathy = {}
self._edgedata = {}
self._celldata = {}
self._topo_edges = None
self._cell_velocity = {}
_shared_grid = None
def share_grid(self,other_sun):
""" Queries for grid information will be redirected to the given
instance - useful for a series of restarts where there's no need to
read the grid in multiple times.
No checking is done to make sure that the given instance actually has the
same grid as this instance - caveat emptor.
"""
self._shared_grid = other_sun
def load_suntans_dat(self):
self.conf = SunConfig( os.path.join(self.datadir,'suntans.dat') )
def save_to_folder(self,folder):
""" Write everything we know about the run into the given folder.
At a minimum this is suntans.dat, points.dat, cells.dat, edges.dat
"""
folder = os.path.abspath(folder)
orig_folder = os.path.abspath(self.datadir)
# suntans.dat - copied from our existing suntans.dat unless they are
# the same file.
if folder != orig_folder:
print("Copying suntans.dat")
shutil.copyfile( os.path.join(orig_folder,'suntans.dat'),
os.path.join(folder,'suntans.dat') )
else:
print("Skipping suntans.dat - it's the same file")
# grid:
g = self.grid()
print("Renumbering before save")
g.renumber()
print("Writing renumbered grid")
g.write_suntans( folder )
# old interface - just delegate to newer class
def conf_float(self,key):
return self.conf.conf_float(key)
def conf_int(self,key):
return self.conf.conf_int(key)
def conf_str(self,key):
return self.conf.conf_str(key)
def modify_dt(self,dt):
""" Changes dt, and tries to alter the simulation period
accordingly. This means changing the number of steps to
cover the same period of time, and updating start_year/day
so that boundaries.c gets the same real-world time for the
start of the simulation
"""
# we can't really trust start_year/day at the beginning,
# but try to preserve the duration:
duration = self.conf.simulation_seconds()
eff_t0 = self.effective_time_zero()
self.conf.set_value('dt',float(dt))
# Now this will use the new value of dt to figure out nsteps
self.conf.set_simulation_period(eff_t0,eff_t0 + duration/(24*3600.))
self.conf.write_config()
def grid(self,processor=None,readonly=True):
""" if processor is None, return a TriGrid object for the entire domain
otherwise, return one processor's grid.
if readonly is True, enable space-savings which reuse
things like the points array
"""
if self._shared_grid:
return self._shared_grid.grid(processor)
if processor not in self._grids:
self._grids[processor] = orthomaker.OrthoMaker(suntans_reader=self,processor=processor,readonly=readonly)
return self._grids[processor]
def proc_nonghost_cells(self,proc):
""" returns an array of cell indices which are *not* ghost cells """
ng_cells = []
g = self.grid(proc)
for i in range(g.Ncells()):
if not self.proc_cell_is_ghost(proc,i):
ng_cells.append(i)
return array(ng_cells)
def proc_cell_is_ghost(self,proc,i):
""" Returns true if the specified cell is a ghost cell.
"""
cdata = self.celldata(proc)
edges = self.grid(proc).edges
marks = edges[cdata[i,5:8].astype(int32),2]
return any(marks==6)
warned_profile_filesize = 0
def steps_available(self,output='grid'):
""" How many timesteps have been written out
(not how many have been run, though, since generally only a small
fraction are actually written out to disk)
"""
if output=='grid':
return self.step_data()['steps_output']
else:
# profile data:
from_step_dat = old_div((self.step_data()['step'] - self.starting_time_step()),self.conf_int('ntoutProfs'))
prof_dat_size = os.stat( self.file_path('FreeSurfaceFile') + ".prof" ).st_size
n_prof_pnts = len( self.profile_points() )
n_steps_in_file = old_div(prof_dat_size, (n_prof_pnts * REALSIZE))
if n_steps_in_file != from_step_dat and not self.warned_profile_filesize:
print("Filesize suggests %i profile timesteps, but step.dat suggests %i"%(n_steps_in_file,from_step_dat))
self.warned_profile_filesize = 1
return n_steps_in_file
def step_data(self,reload=False):
""" Return a dict with information parsed from step.dat, or None if there is no step.dat file
"""
if self._step_data is None or reload:
try:
raw = open(self.datadir + "/step.dat","rt").read()
except IOError:
return None
m = re.match("On (\d+) of (\d+), t=([0-9\.]+) \(([0-9\.]+)% Complete, (\d+) output\)",
raw)
if m is None:
print("------step.dat------")
print(raw)
print("--------------------")
raise Exception("Failed to parse step.dat. Probably transient - try again")
self._step_data = {'step':int(m.group(1)),
'total_steps':int(m.group(2)),
'model_time':float(m.group(3)),
'percent':float(m.group(4)),
'steps_output':int(m.group(5))
}
return self._step_data
def domain_decomposition(self,np=None):
""" Once the grid and depths are in place, call on sun to
break it out into multiple domains
"""
if np is None:
np = self.num_processors()
# possible that this is the first time to run the decomposition
if np is None:
print("Couldn't detect number of processors - will use np=1")
np = 1
cmd = [self.sun_exec,'-g','-vvv','--datadir=%s'%os.path.abspath(self.datadir)]
MPIrunner(cmd,np=np)
# and check to see that it worked - or at least created a topology.dat.0 file
if not os.path.exists( self.file_path('topology',0) ):
raise Exception("Appears that partitioning failed - probably couldn't find sun (%s) or MPI"%self.sun_exec)
def run_simulation(self,np=None,wait=1):
""" run it!
if wait is true, will not return until simulation process completes
"""
if np is None:
np = self.num_processors()
if np is None:
print("Couldn't detect number of processors - will use np=1")
np = 1
cmd = [self.sun_exec,'-s','-vvv','--datadir=%s'%os.path.abspath(self.datadir)]
if self.starting_time_step() != 0:
print("Will be a restart")
cmd.append('-r')
if wait:
t_start = time.time()
MPIrunner(cmd,np=np,wait=wait)
if wait:
t_elapsed = time.time() - t_start
self.log_runtime(np,t_elapsed)
def log_runtime(self,np,t_elapsed):
""" For now, just log as many of the relevant details as possible to a file in datadir.
Depending on how teragrid charges for stuff, this may get expanded into something that
can approximate the anticipated run time.
possible factors:
- the sun executable
- all of sedi.dat, wave.dat
- the grid (discerned from depth.dat)
- Nkmax, dt, nonlinear, beta==0, gamma==0
- np
"""
details = {}
for datfile in ["sedi.dat","wave.dat"]:
fn = os.path.join(self.datadir,datfile)
if os.path.exists(fn):
fp = open(fn,"rt")
hsh = hashlib.md5() # md5.new()
hsh.update(fp.read())
fp.close()
details[datfile ] = hsh.hexdigest()
else:
details[datfile] = "missing"
fp = open(self.sun_exec,"rb")
hsh = hashlib.md5() ; hsh.update(fp.read())
details['sun'] = hsh.hexdigest() # md5.new(fp.read()).hexdigest()
fp.close()
# get some idea of grid size just from the first processor:
cdata = self.celldata(0)
details['Nc0'] = "%d"%len(cdata)
details['Nc3D0'] = "%d"%cdata[4].sum()
details['np'] = "%d"%np
details['dt'] = "%f"%self.conf_float('dt')
details['nonlinear'] = "%d"%self.conf_int('nonlinear')
for s in ['beta','gamma']:
if self.conf_float(s)!=0:
details[s] = "yes"
else:
details[s] = "no"
# And write it out...
log_fp = open(os.path.join(self.datadir,"timing.out"),"wt")
for k in details:
log_fp.write("%s = %s\n"%(k,details[k]))
log_fp.write("elapsed = %f\n"%t_elapsed)
log_fp.close()
# The actual data:
# Most data is defined at the voronoi centers
# Some values (free surface), or defined only at the surface,
# while others are defined for every z-level
# Some values have a time axis as well.
# Want something like:
# fs = sdata.freesurface()
# # fs has a time axis, which can be referenced
# #
# gridplot( fs.grid(time=213.4) )
# gridplot( fs.grid(step=100) )
# Maybe the grid can handle most of the plotting, and we just
# pass it the grid data?
# Then the freesurface would just map cell indices to a scalar
# value.
# Surface velocity would map cell indices to a 3-vector
def cell_velocity_fread(self,processor,time_step):
""" Returns velocity values in an array Nc x Nkmax x 3
for now, only a single time_step can be specified
returns tuple of grid, velocity[cell,z_level,component]
"""
g = self.grid(processor)
Ncells = g.Ncells()
u_name = self.file_path("HorizontalVelocityFile",processor)
nsteps = self.steps_available()
nkmax = self.conf_int('Nkmax')
fp = open(u_name,"rb")
if time_step >= nsteps:
print("Using last time step instead of whatever you asked for")
time_step = nsteps - 1
if time_step < 0:
time_step = nsteps + time_step
if time_step < 0:
print("Clamping time step to be non-negative")
time_step = 0
frame_size = nkmax*3*Ncells*REALSIZE
fp.seek(time_step*frame_size)
raw = fp.read(frame_size)
# assume it was run on the same machine, so endian-ness
# is the same
values = fromstring(raw,REALTYPE)
results = values.reshape( (nkmax,3,Ncells) )
results = swapaxes(results,1,2) # => z-level, cell index, component
results = swapaxes(results,0,1) # => cell index, z-level, component
return g,results
def cell_velocity(self,processor,time_step=None):
""" Like cell_velocity(), but try to memory map the file, and returns all
timesteps
returns tuple of grid, velocity[time,cell,z_level,component]
"""
g = self.grid(processor)
if processor not in self._cell_velocity:
Ncells = g.Ncells()
u_name = self.file_path("HorizontalVelocityFile",processor)
nkmax = self.conf_int('Nkmax')
# Choose the number of steps based on the file size -
# A single step will take Ncells*Nkmax*3*REALSIZE
frame_size = nkmax*3*Ncells*REALSIZE
nbytes = os.stat(u_name).st_size
# self.steps_available()
nsteps = nbytes // frame_size
final_shape = (nsteps,nkmax,3,Ncells)
results = memmap(u_name, dtype=REALTYPE, mode='r', shape=final_shape)
# time, k, component, cell
results = swapaxes(results,2,3) # => time, z-level, cell index, component
results = swapaxes(results,1,2) # => time, cell index, z-level, component
self._cell_velocity[processor] = results
else:
results = self._cell_velocity[processor]
if time_step is not None:
results = results[time_step,...]
return g,results
def cell_nuT(self,processor,time_step):
return self.cell_scalar('EddyViscosityFile',processor,time_step)
def cell_salinity(self,processor,time_step=None):
""" Read salinity values into an array Nc x Nkmax
for now, only a single time_step can be specified
returns tuple of grid, salinity[cell,z_level]
"""
return self.cell_scalar('SalinityFile',processor,time_step)
def cell_temperature(self,processor,time_step=None):
""" Read temperature values into an array Nc x Nkmax
for now, only a single time_step can be specified
returns tuple of grid, salinity[cell,z_level]
"""
return self.cell_scalar('TemperatureFile',processor,time_step)
_cell_scalars = None
def cell_scalar(self,filename_name,processor,time_step=None):
""" Read a cell-based scalar value from suntans output.
filename_name: the suntans.dat field that has the setting we want.
SalinityFile
TemperatureFile
"""
if self._cell_scalars is None:
self._cell_scalars = {}
if filename_name not in self._cell_scalars:
self._cell_scalars[filename_name] = {}
g = self.grid(processor)
if processor not in self._cell_scalars[filename_name]:
Ncells = g.Ncells()
s_name = self.file_path(filename_name,processor)
nsteps = self.steps_available()
nkmax = self.conf_int('Nkmax')
frame_size = Ncells * nkmax * REALSIZE
nbytes = os.stat(s_name).st_size
data_shape = ( nbytes//frame_size,nkmax,Ncells)
try:
full_scal = memmap(s_name,
dtype=REALTYPE,
mode='r',
shape=data_shape)
# print "Successfully mapped %s"%s_name
except mmap.error:
print("Looks like we can't memory map the files. Going to be slow...")
print("Size of %s is %d bytes"%(s_name,bytes))
# is there another option, like a dynamically read, file-backed array?
fp = open(s_name,'rb')
full_scal = fromstring(fp.read(),dtype=REALTYPE ).reshape( data_shape )
fp.close()
self._cell_scalars[filename_name][processor] = swapaxes(full_scal,1,2)
full_scal = self._cell_scalars[filename_name][processor]
nsteps = full_scal.shape[0]
if time_step is not None:
if time_step >= nsteps:
time_step = nsteps - 1
if time_step < 0:
time_step = nsteps + time_step
if time_step < 0:
time_step = 0
return g,full_scal[time_step,:,:]
else:
return g,full_scal[:,:,:]
def close_files(self):
""" Close things like memmap'd scalar files
"""
if self._cell_scalars is not None:
for filename in self._cell_scalars:
for proc in list(self._cell_scalars[filename].keys()):
del self._cell_scalars[filename][proc]
if self._freesurface is not None:
for proc in list(self._freesurface.keys()):
del self._freesurface[proc]
if self._cell_velocity is not None:
for proc in list(self._cell_velocity.keys()):
del self._cell_velocity[proc]
if self._celldata is not None:
for proc in list(self._celldata.keys()):
del self._celldata[proc]
if self._edgedata is not None:
for proc in list(self._edgedata.keys()):
del self._edgedata[proc]
def ctop(self,processor,time_step):
h = self.freesurface(processor,[time_step])[0]
return self.h_to_ctop(h)
def h_to_ctop(self,h,dzmin=None):
if dzmin is None:
# recent suntans code does this
# the 2 is from #define DZMIN_SURFACE 2*DZMIN
# so self.dzmin should reflect #DEFINE DZMIN 0.001 (or whatever it currently is)
dzmin = 2*self.dzmin
ctops = searchsorted(self.z_levels() - dzmin, -h)
return ctops
_freesurface = None
def freesurface(self,processor,time_step=None):
""" Returns freesurface values in an array (len(time_step),Nc)
(or a 1-d array if time_step is a scalar).
if time_step is not specified, returns freesurface for all cells, all timesteps
in the file.
"""
g = self.grid(processor)
if self._freesurface is None:
self._freesurface = {}
if processor not in self._freesurface:
Ncells = g.Ncells()
fs_name = self.file_path("FreeSurfaceFile",processor)
frame_size = Ncells * REALSIZE
nbytes = os.stat(fs_name).st_size
data_shape = ( nbytes//frame_size,Ncells)
try:
self._freesurface[processor] = memmap(fs_name, dtype=REALTYPE, mode='r', shape=data_shape)
except mmap.error:
fp = open(fs_name,'rb')
self._freesurface[processor] = fromstring(fp.read(),dtype=REALTYPE ).reshape( data_shape )
fp.close()
if time_step is None:
return self._freesurface[processor]
else:
return self._freesurface[processor][time_step,:]
def file_path(self,conf_name,processor=None):
base_name = self.conf_str(conf_name)
if base_name is None:
# raise Exception,"File path configuration not found for %s"%conf_name
base_name = conf_name
if processor is not None and conf_name not in ('points','DataLocations'):
base_name += ".%i"%processor
return self.datadir+"/"+base_name
def profile_points(self,force=False):
if self._profile_points is None or force:
prof_points_fp = open(self.file_path("DataLocations"))
xy_points = []
for line in prof_points_fp:
this_point = [float(s) for s in line.split()]
if len(this_point) == 2:
xy_points.append(this_point)
self._profile_points = array(xy_points)
return self._profile_points
_profdata = None
def profdata(self):
""" Reads the profdata.dat file,
"""
if self._profdata is None:
fn = self.file_path('ProfileDataFile')
fp = open(fn,'rb')
# data format:
# (4 byte int)numTotalDataPoints: Number of data points found on all processors. Note that
# that this could be different from the number specified since some may lie outside the domain.
# (4 byte int)numInterpPoints: Number of nearest neighbors to each point used for interpolation.
# (4 byte int)NkmaxProfs: Number of vertical levels output in the profiles.
# (4 byte int)nsteps: Total number of time steps in the simulation.
# (4 byte int)ntoutProfs: Frequency of profile output. This implies a total of nsteps/ntoutProfs are output.
# (8 byte double)dt: Time step size
# (8 byte double array X NkmaxProfs)dz: Contains the vertical grid spacings.
# (4 byte int array X numTotalDataPoints)allIndices: Contains the indices of each point that determines its
# original location in the data file. This is mostly for debugging since the output data is resorted
# so that it is in the same order as it appeared in the data file.
# (4 byte int array X 2*numTotalDataPoints)dataXY: Contains the original data points at (or near) which profiles
# are output.
# (8 byte double array X numTotalDataPoints*numInterpPoints)xv: Array containing the x-locations of the nearest
# neighbors to the dataXY points. If numInterpPoints=3, then the 3 closest neighbors to the point
# (dataXY[2*i],dataXY[2*i+1]) are (xv[3*i],yv[3*i]), (xv[3*i+1],yv[3*i+1]), (xv[3*i+2],yv[3*i+2]).
# (8 byte double array X numTotalDataPoints*numInterpPoints)yv: Array containing the y-locations of the nearest
# neighbors to the dataXY points (see xv above).
pdata = {}
hdr_ints = fromstring(fp.read(5*4),int32)
pdata['numTotalDataPoints'] = hdr_ints[0]
pdata['numInterpPoints'] = hdr_ints[1]
pdata['NkmaxProfs'] = hdr_ints[2]
pdata['nsteps'] = hdr_ints[3]
pdata['ntoutProfs'] = hdr_ints[4]
pdata['dt'] = fromstring(fp.read(REALSIZE),REALTYPE)
pdata['dzz'] = fromstring(fp.read(REALSIZE*pdata['NkmaxProfs']),REALTYPE)
pdata['allIndices'] = fromstring(fp.read(4*pdata['numTotalDataPoints']),int32)
# Wait a second - this file doesn't even have proc/cell info...
dataxy = fromstring(fp.read(REALSIZE*2*pdata['numTotalDataPoints']),REALTYPE)
# pdata['dataXY_serial'] = dataxy # needs to be reshaped
pdata['dataXY'] = dataxy.reshape( (-1,2) )
print("About to read coordinates, file position is",fp.tell())
xvyv = fromstring(fp.read(2*REALSIZE*pdata['numTotalDataPoints']*pdata['numInterpPoints']),
REALTYPE)
pdata['xvyv'] = xvyv
pdata['xy'] = xvyv.reshape( (2,-1) ).transpose()
self._profdata = pdata
return self._profdata
def nkmax_profs(self):
nkmax_profs = self.conf_int('NkmaxProfs')
if nkmax_profs == 0:
nkmax_profs = self.conf_int('Nkmax')
return nkmax_profs
def profile_data(self,scalar,timestep=None):
""" scalar is one of HorizontalVelocityFile,
FreeSurfaceFile, etc"""
if scalar not in self._profile_data:
prof_pnts = self.profile_points()
prof_len = prof_pnts.shape[0]
prof_fname = self.file_path(scalar) + ".prof"
if not os.path.exists(prof_fname):
return None
## Figure out the shape of the output:
# I'm assuming that profile data gets spat out in the same
# ordering of dimensions as regular grid-based data
shape_per_step = []
# profiles.c writes u first then v, then w, each with a
# separate call to Write3DData()
if scalar == 'HorizontalVelocityFile':
shape_per_step.append(3)
# the outer loop is over profile points
shape_per_step.append(prof_len)
# This used to drop the z-level dimension for 2-D runs, but
# moving forward, seems better to always include z even if
# there's only one layer, so post-processing scripts don't have
# to special case it.
## And does it have z-levels? if so, that is the inner-most
# loop, so the last dimension of the array
if scalar != 'FreeSurfaceFile':
nkmax_profs = self.nkmax_profs()
shape_per_step.append(nkmax_profs)
# better to use the size of the specific file we're opening:
# NOT this way:
# profile_steps = self.steps_available(output='profile')
# but this way:
prof_dat_size = os.stat( prof_fname).st_size
bytes_per_step = REALSIZE * prod( array(shape_per_step) )
n_steps_in_file = int( prof_dat_size//bytes_per_step )
final_shape = tuple([n_steps_in_file] + shape_per_step)
# print "Total shape of profile data: ",final_shape
if self.conf_int('numInterpPoints') != 1:
raise Exception("Sorry - please set numInterpPoints to 1")
if 1:
# print "Trying to memory map the data.."
data = memmap(prof_fname, dtype=REALTYPE, mode='r', shape=final_shape)
else:
prof_fp = open(prof_fname,"rb")
data = fromstring(prof_fp.read(),float64)
prof_fp.close()
data = data.reshape(*final_shape)
self._profile_data[scalar] = data
data = self._profile_data[scalar]
if timestep is not None:
if timestep >= data.shape[0]:
print("Bad timestep %d, last valid step is %d"%(timestep,data.shape[0]-1))
timestep = data.shape[0] - 1
return data[timestep]
else:
return data
def scalars(self):
""" Returns a list of names for scalar outputs, i.e.
[SalinityFile, TemperatureFile]
"""
scals = []
if float(self.conf['beta']) != 0:
scals.append('SalinityFile')
if float(self.conf['gamma']) != 0:
scals.append('TemperatureFile')
scals.append('EddyViscosityFile')
return scals
# Profile processing:
def profile_to_transect(self,xy,absdays_utc,scalar):
""" Extract data from profile dumps close to the given times and
locations, then construct a Transect instance.
xy is an Nx2 vector of utm coordinates
absdays_utc is an N vector of UTC abs-days, from date2num
scalar identifies what variable is output - 'SalinityFile'
For now, no interpolation is done, and the transect will have the
actual xy, times and depths from the simulation.
In the future there could be options for interpolating in time,
horizontal space, and/or vertical space, such that the resulting
transect would be congruent with some other transect
"""
## Allocate
N = len(xy)
new_xy = zeros( (N,2), float64 )
new_times = zeros( N, float64 )
if self.nkmax_profs() != self.conf_int('Nkmax'):
# This isn't hard, just too lazy to do it right now.
raise Exception("Not quite smart enough to handle profiles that are different Nkmax than grid")
new_scalar = zeros( (self.conf_int('Nkmax'),N), float64 )
z_levels = concatenate( ([0],-self.z_levels()) )
z_interfaces = repeat( z_levels[:,newaxis],N,axis=1 )
mask = zeros( new_scalar.shape,bool )
## Get timeline:
t0 = self.conf.time_zero()
t0_absdays = date2num( t0 )
prof_absdays = t0_absdays + self.timeline(units='days',output='profile')
pnts = self.profile_points()
# this memory maps the profile data file, so there is no advantage in pulling
# just a single timestep.
prof_data = self.profile_data(scalar)
prof_h = self.profile_data('FreeSurfaceFile')
if len(prof_data.shape) == 2:
new_shape = (prof_data.shape[0],prof_data.shape[1],1)
prof_data = prof_data.reshape( new_shape )
# prof_data: [step,point,z-level]
for i in range(N):
# what timestep is this closest to?
prof_step = searchsorted(prof_absdays,absdays_utc[i])
# what profile point is it closest to?
prof_loc = self.xy_to_profile_index(xy[i])
# and find the bathymetry there:
proc,cell = self.closest_cell(xy[i])
cdata = self.celldata(proc)
bathy = -cdata[cell,3]
# read that profile
fs_height = prof_h[prof_step,prof_loc]
new_xy[i] = pnts[prof_loc]
new_times[i] = prof_absdays[prof_step]
new_scalar[:,i] = prof_data[prof_step,prof_loc,:]
# and take care of a possible thin cell at the surface:
Nk = cdata[cell,4].astype(int32)
ctop = min( self.h_to_ctop(fs_height), Nk-1)
z_interfaces[:ctop+1,i] = fs_height
z_interfaces[Nk:,i] = bathy
mask[:ctop,i] = True
mask[Nk:,i] = True
# apply mask to scalar
new_scalar = ma.array(new_scalar,mask=mask)
if 0:
# Transect assumes that data are located at the given nodes, so we need to
# roughly translate the bin-based values of SUNTANS into centered points.
# This does not take into account the location of the freesurface or the bed
# within the given z-level.
z_level_centers = self.bathymetry_offset()-self.z_levels() + 0.5*self.dz()
return transect.Transect(new_xy,new_times,
z_level_center,
new_scalar,
desc='Suntans output: %s'%scalar)
else:
# New transect code allows for zonal scalar -
z_interfaces = z_interfaces + self.bathymetry_offset()
return transect.Transect(new_xy,new_times,
z_interfaces,
new_scalar,
desc='Suntans output: %s'%scalar)
def map_local_to_global(self,proc):
gglobal=self.grid()
glocal=self.grid(proc)
l2g=np.zeros( glocal.Ncells(), 'i4')
for li in range(glocal.Ncells()):
l2g[li] = gglobal.find_cell( glocal.cells[li] )
return l2g
# in-core caching in addition to filesystem caching
_global_to_local = None
def map_global_cells_to_local_cells(self,cells=None,allow_cache=True,check_chain=True,
honor_ghosts=False):
""" Map global cell indices to local cell indices.
if cells is None, return a mapping for all global cells
if cells is None, and allow_cache is true, attempt to read/write
a cached mapping as global_to_local.bin
if honor_ghosts is True, then make the mapping consistent with the "owner"
of each cell, rather than just a processor which contains that cell.
"""
# Map global cells to processors, then iterate over processors, calculating
# surface velocity, and averaging over the relevant cells.
if cells is None and allow_cache:
if self._global_to_local is not None:
print("using in-core caching for global to local mapping")
return self._global_to_local
if check_chain:
datadirs = [s.datadir for s in self.chain_restarts()]
else:
datadirs = [self.datadir]
for datadir in datadirs[::-1]:
cache_fn = os.path.join(datadir,'global_to_local.bin')
if os.path.exists(cache_fn):
fp = open(cache_fn,'rb')
global_to_local = None
try:
global_to_local = pickle.load(fp)
finally:
fp.close()
if global_to_local is not None:
return global_to_local
cache_fn = os.path.join(self.datadir,'global_to_local.bin')
else:
cache_fn = None
grid = self.grid()
if cells is None:
print("Will map all cells")
cells = arange(grid.Ncells())
all_cells = True
else:
all_cells = False
global_to_local = zeros( len(cells), [('global',int32),
('proc',int32),
('local',int32)])
global_to_local['global'] = cells
global_to_local['proc'] = -1
for processor in range(self.num_processors()):
print("P%d"%processor, end=' ')
local_g = self.grid(processor)
if all_cells:
# faster to loop over the local cells
if honor_ghosts:
local_cells=self.proc_nonghost_cells(processor)
else:
local_cells=range(local_g.Ncells())
for i in local_cells:
gi = grid.find_cell( local_g.cells[i] )
gtl = global_to_local[gi]
if gtl['proc'] >= 0:
continue
gtl['proc'] = processor
gtl['local'] = i
else:
for gtl in global_to_local:
if gtl['proc'] >= 0:
continue
try:
i = local_g.find_cell( grid.cells[gtl['global']] )
gtl['proc'] = processor
gtl['local'] = i
except trigrid.NoSuchCellError:
pass
print("done mapping")
if cache_fn is not None:
fp = open(cache_fn,'wb')
pickle.dump(global_to_local,
fp)
fp.close()
self._global_to_local = global_to_local
return global_to_local
def cell_values_local_to_global(self,cell_values=None,func=None):
""" Given per-processor cell values (for the moment, only supports
2-D cell-centered scalars) return an array for the global cell-centered
data
"""
g2l = self.map_global_cells_to_local_cells()
gg = self.grid()
print("Compiling local data to global array")
g_data = None # allocate lazily so we know the dtype to use
for p in range(self.num_processors()):
if cell_values:
local_values = cell_values[p]
else:
local_values = func(p)
# not terribly efficient, but maybe okay...
local_g2l = g2l[ g2l['proc'] == p ]
if g_data is None:
g_data = zeros( gg.Ncells(), dtype=local_values.dtype)
g_data[ local_g2l['global'] ] = local_values[ local_g2l['local'] ]
print("Done compiling local data to global array")
return g_data
def read_section_defs(self):
fp = open(self.file_path('sectionsinputfile'),'rt')
def tok_gen(fp):
for line in fp:
for snip in line.split():
yield snip
token = tok_gen(fp).__next__
Nsections = int(token())
sections = [None]*Nsections
for nsec in range(Nsections):
Nnodes = int(token())
nodes = []
for n in range(Nnodes):
nodes.append( int(token()) )
sections[nsec] = nodes
return sections
def full_to_transect(self,xy,absdays,scalar_file,min_dx=10.0):
""" Construct a Transect from full grid scalar output, where xy is a sequence of points
giving the transect, absdays a sequence of times, and scalar which field should be
read.
For now, it's not smart enough to know how big the cells are and do a proper line-cell
intersection, so you have to specify a min_dx, and as long as that is significantly
smaller than the grid size, it will pick up all the cells with a significant intersection
with the transect.
also the handling of the freesurface and timesteps are lacking. The freesurface is used
only to decide ctop - it is not used to truncate the surface cell.
And no interpolation in time is done - only the nearest timestep is extracted.
"""
xy = asarray(xy)
absdays = asarray(absdays)
utm_points,sources = upsample_linearring(xy,density=200.0,closed_ring=0,return_sources=1)
# construct an interpolated set of times, estimating a timestamp for each of the newly
# interpolated utm_points.
absdays_expanded = interp(sources, arange(len(absdays)),absdays)
utm_deltas = sqrt(sum(diff(utm_points,axis=0)**2,axis=1))
utm_dists = concatenate( ([0],cumsum(utm_deltas)) )
## choose a timestep:
timeline = date2num(self.time_zero()) + self.timeline(output='grid',units='days')
steps = searchsorted(timeline,absdays_expanded) # the output right after the requested date
# adjust to whichever step closer:
for i in range(len(steps)):
if steps[i]>0 and timeline[steps[i]] - absdays_expanded[i] > absdays_expanded[i] - timeline[steps[i]-1]:
steps[i] -= 1
g = self.grid()
global_cells = []
for xy in utm_points:
global_cells.append( g.closest_cell(xy) )
# Now remove any duplicates
global_cells = array(global_cells)
valid = (global_cells[:-1] != global_cells[1:] )
valid = concatenate( (valid,[True]) )
global_cells = global_cells[valid]
utm_dists = utm_dists[valid]
utm_points = utm_points[valid]
steps = steps[valid]
absdays_expanded = absdays_expanded[valid]
nkmax = self.conf_int('nkmax')
scalar = zeros( (len(global_cells),nkmax), float64 )
local_proc_cells = zeros( (len(global_cells),2), int32 ) - 1
bathy_offset = self.bathymetry_offset()
interface_elevs = bathy_offset + concatenate( ([0], -self.z_levels()) ) # Nk + 1 entries!
elev_fs = zeros( len(global_cells), float64 ) # these will be corrected for bathy_offset
elev_bed = zeros( len(global_cells), float64 )
# elevations of interfaces for each watercolumn
elev_per_column = zeros( (len(global_cells),len(interface_elevs)), float64)
for proc in range(self.num_processors()):
# print "Reading salinity from processor %d"%proc
local_g, full_scal = self.cell_scalar(scalar_file,proc) # [Ntimesteps,cells,z-level]
fs = self.freesurface(proc) # [Ntimesteps,cells]
celldata = self.celldata(proc)
for gc_i in range(len(global_cells)):
gc = global_cells[gc_i]
step = steps[gc_i]
if local_proc_cells[gc_i,0] >= 0:
continue # already been read
try:
i = local_g.find_cell( g.cells[gc] )
local_proc_cells[gc_i,0] = proc
local_proc_cells[gc_i,1] = i
ktop = self.h_to_ctop(fs[step,i])
kmax = int(celldata[i,4])
elev_fs[gc_i] = fs[step,i] + bathy_offset
elev_bed[gc_i] = -celldata[i,3] + bathy_offset
scalar[gc_i,:] = full_scal[step,i,:]
scalar[gc_i,:ktop] = nan
scalar[gc_i,kmax:] = nan
elev_per_column[gc_i,:] = interface_elevs
elev_per_column[gc_i,ktop] = elev_fs[gc_i]
elev_per_column[gc_i,kmax] = elev_bed[gc_i]
except trigrid.NoSuchCellError:
continue
## Make that into a transect:
scalar = ma.masked_invalid(scalar)
# ideally we'd include the time-varying freesurface elevation, too...
t = transect.Transect(xy=utm_points,
times = timeline[steps],
elevations=elev_per_column.T,
scalar=scalar.T,
dists=utm_dists)
t.trim_to_valid()
return t
_finder = None
def xy_to_profile_index(self,xy):
if self._finder is None:
pnts = self.profile_points()
data = arange(len(pnts))
finder = field.XYZField(pnts,data)
finder.build_index()
self._finder = finder
return self._finder.nearest(xy)
def timeline(self,units='seconds',output='grid'):
"""
units: seconds, minutes, hours, days
or absdays, which returns matplotlib-style datenums
output: grid - timeseries for grid outputs
profile - timeseries for profile outputs
times are measured from sun.time_zero() except for absdays
which is the matplotlib absolute time unit, decimal days
since something...
Note that in some cases the outputs are not evenly spaced,
particularly when ntout does not divide evenly into nsteps
and when the starting time step was not on an integral number
of ntouts
"""
steps_output = self.steps_available(output)
if output=='grid':
output_interval = self.conf_int('ntout')
elif output=='profile':
output_interval = self.conf_int('ntoutProfs')
else:
raise Exception("bad output spec for timeline: %s"%output)
dt = output_interval*self.conf_float('dt')
if output == 'grid':
# a little tricky, as the starting step may not be an integral number of
# ntout.
offset = self.starting_time_step() % self.conf_int('ntout')
steps = arange(0,steps_output)*output_interval + self.starting_time_step() - offset
steps[0] += offset
# may also have a straggler
last_step = StoreFile(self,0).timestep()
if last_step != steps[-1]:
steps = concatenate( (steps,[last_step]) )
tseries_seconds = steps*self.conf_float('dt')
else:
# profiles are output when the step is a multiple of the output interval, and only starting
# with the end of the first step:
# this expression rounds starting_time_step+1 up to the next even multiple of output_interval.
first_prof_output_step = output_interval * \
((self.starting_time_step() + output_interval)//output_interval)
tseries_seconds = arange(0,steps_output)*dt + first_prof_output_step*self.conf_float('dt')
offset = 0
if units == 'seconds':
divisor = 1.
elif units == 'minutes':
divisor = 60.
elif units == 'hours':
divisor = 3600.
elif units == 'days':
divisor = 24*3600
elif units =='absdays':
divisor = 24*3600
offset = date2num( self.time_zero() )
else:
raise Exception("Bad time unit: %s"%units)
return offset + tseries_seconds/divisor
class Topology(object):
pass
def topology(self,processor):
if processor not in self._topos:
topo_path = self.file_path('topology',processor)
try:
fp = open(topo_path)
except IOError:
return None
def int_iter(fp):
for line in fp:
for num in map(int,line.split()):
yield num
# just returns integer after integer...
nums = int_iter(fp)
topo = SunReader.Topology()
topo.filename = topo_path
topo.num_processors = next(nums)
topo.num_neighbors = next(nums)
topo.neighbor_ids = [next(nums) for i in range(topo.num_neighbors)]
topo.cellsend = [None]*topo.num_neighbors
topo.cellrecv = [None]*topo.num_neighbors
topo.edgesend = [None]*topo.num_neighbors
topo.edgerecv = [None]*topo.num_neighbors
for i in range(topo.num_neighbors):
num_cellsend = next(nums)
num_cellrecv = next(nums)
num_edgesend = next(nums)
num_edgerecv = next(nums)
topo.cellsend[i] = array([next(nums) for j in range(num_cellsend)])
topo.cellrecv[i] = array([next(nums) for j in range(num_cellrecv)])
topo.edgesend[i] = array([next(nums) for j in range(num_edgesend)])
topo.edgerecv[i] = array([next(nums) for j in range(num_edgerecv)])
# 3 and 6 come from the limits in grid.h, MAXBCTYPES-1 and MAXMARKS-1
topo.celldist = array([next(nums) for i in range(3)])
topo.edgedist = array([next(nums) for i in range(6)])
grid = self.grid(processor)
topo.cellp = array([next(nums) for i in range(grid.Ncells())])
topo.edgep = array([next(nums) for i in range(grid.Nedges())])
self._topos[processor] = topo
return self._topos[processor]
def sendrecv_edges(self,data):
""" data: list with num_processors() elements, each being iterable with Nedges()
elements.
Exchange data as described by the topology files, overwriting entries in the given arrays.
"""
for proc in range(self.num_processors()):
print("sendrecv for proc %d"%proc)
topo = self.topology(proc)
for i in range(topo.num_neighbors):
nbr = topo.neighbor_ids[i]
print(" receiving from %d"%nbr)
topo_nbr = self.topology(nbr)
# find the edge ids that they're supposed to send to us:
me_to_them = topo_nbr.neighbor_ids.index(proc)
# These are the indices, as known by the neighbor:
nbr_edges_to_send = topo_nbr.edgesend[me_to_them]
# And the indices as known to me:
my_edges_to_recv = topo.edgerecv[i]
data[proc][my_edges_to_recv] = data[nbr][nbr_edges_to_send]
def num_processors(self):
t = self.topology(0)
if t:
return t.num_processors
else:
return None
def time_zero(self):
""" return python datetime for the when t=0. This has been moved
into SunConfig, and here we just delegate to that.
"""
return self.conf.time_zero()
def simulation_period(self,end_is_last_output=True):
""" return a pair of python datetime objects for the start and end of the simulation
This includes the offset due to a restart, and gives the ending datetime of when the
simulation would finish, irrespective of how many steps have been run so far.
there are some corner cases here that get fuzzy, depending on which files
are availale for determining the simulation period. Read the code for details,
but in general, life is easier if ntout divides into nsteps evenly.
where possible, end_is_last_output determines how to choose the exact
definition of the end date.
"""
start_fn = self.file_path('StartFile',0)
store_fn = self.file_path('StoreFile',0)
step_data = self.step_data()
if os.path.lexists(start_fn):
if os.path.exists(store_fn):
# I don't remember the exact reason that it's better to
# use storefiles, except that when runs are moved around,
# storefiles are more likely to still exist, while links to
# startfiles get broken. But it's possible that the storefile
# is empty, it's not a restart, and we'd have been better off
# to reader
# From a storefile and step_data, can work back to get
# starting time
sf = StoreFile(self,processor=0)
last_output_date = sf.time()
grid_outputs = step_data['steps_output']
# note that this is duration from first to last output.
run_duration = self.conf.timestep() * int(self.conf['ntout']) * (grid_outputs-1)
start_date = last_output_date - run_duration
elif os.path.exists(start_fn):
# So it's presumably restart:
start_date,end_date = self.conf.simulation_period()
if self.starting_time_step()==0:
raise Exception("%s looks like a restart, but can't find Start or Store file"%self.datadir)
restart_offset = self.starting_time_step() * self.conf.timestep()
start_date += restart_offset
else:
raise Exception("Looks like a restart, but store and start files are missing")
else:
# presumably not a restart, and the configured period is what we want.
start_date,end_date = self.conf.simulation_period()
nsteps = int(self.conf['nsteps'])
ntout = int(self.conf['ntout'])
if end_is_last_output:
# round down to integer number of ntout periods:
duration = self.conf.timestep() * ntout * (nsteps//ntout)
else:
duration = self.conf.timestep() * nsteps
end_date = start_date + duration
return start_date,end_date
def starting_time_step(self):
if self._starting_time_step is None:
start_file = self.file_path('StartFile',0)
if os.path.exists(start_file):
# print "This is a restart."
fp = open(start_file,'rb')
x = fromstring(fp.read(4),int32)
fp.close()
self._starting_time_step = x[0]
else:
self._starting_time_step = 0
return self._starting_time_step
def parent_datadir(self):
""" If this is a restart, return the datadir of the original
(if possible - this requires that StartFile is a symlink!)
if this is a restart, but the parent run can't be found, returns
-1.
if it's not a restart, return False
"""
start_file = self.file_path('StartFile',0)
if os.path.exists(start_file):
if os.path.islink(start_file):
parent_dir = os.path.dirname( os.path.realpath( start_file ) )
if parent_dir == self.datadir:
# shouldn't ever happen, but occasionally runs are corrupted like this.
return False
else:
return parent_dir
else:
print(" It's a restart, but no symlink")
return -1
else:
return False
_restarts = None
def chain_restarts(self,max_count=None):
""" returns something that can be iterated over to get
sunreader instances ending with this
one. only goes back as far as the symlink trail of startfiles will
allow
the last one will be self - not a copy of self.
"""
if self._restarts is None:
suns = []
sun = self
while sun is not None and (max_count is None or len(suns) < max_count):
suns.insert(0,sun)
parent_dir = sun.parent_datadir()
# print "Checking parent_dir",parent_dir
if isinstance(parent_dir,str):
sun = SunReader(parent_dir)
else:
sun = None
print("Found %d chained restarts"%len(suns))
if max_count is not None: # only cache this when we got all of them
self._restarts = suns
else:
return suns
if max_count is not None and len(self._restarts > max_count):
return self._restarts[-max_count:]
else:
return self._restarts
def effective_time_zero(self):
""" return datetime for the when t=0, adjusted to be consistent with
possible changes in dt between restarts.
"""
suns = self.chain_restarts()[:-1]
if len(suns) > 0:
# The step that each one ended on
ending_steps = array( [StoreFile(s,0).timestep() for s in suns] )
num_steps = ending_steps.copy()
# here we assume that first that we got back (which may *not* be the
# actual first simulation - we may have lost the trail of symlinks)
# has a timestep representative of all runs up to that point
num_steps[1:] -= num_steps[:-1]
# And the possibly varying dt
dts = array( [s.conf_float('dt') for s in suns] )
sim_seconds = num_steps*dts
total_past_seconds = sum(sim_seconds)
# This should be the real time of our start
t = suns[0].time_zero() + datetime.timedelta( total_past_seconds/(24.*3600.) )
t0 = t - datetime.timedelta( self.starting_time_step()*self.conf_float('dt') / (24.*3600) )
else:
t0 = self.time_zero()
return t0
def bathymetry_offset(self):
""" a bit hackish - a constant offset is subtracted from all NAVD88 bathymetry
to ensure that there aren't issues with the way that suntans reads bathymetry
as absolute value.
this code peaks into the bathymetry processing code to intuit what offset was
added to the bathymetry
"""
return read_bathymetry_offset()
# Datum helpers
def srs_text(self):
return "EPSG:26910"
def srs(self):
proj = osr.SpatialReference()
proj.SetFromUserInput(self.srs_text())
return proj
def xform_suntans_to_nad83(self):
nad83_proj = osr.SpatialReference()
nad83_proj.SetFromUserInput('NAD83')
xform = osr.CoordinateTransformation(self.srs(),nad83_proj)
return xform
def xform_nad83_to_suntans(self):
nad83_proj = osr.SpatialReference()
nad83_proj.SetFromUserInput('NAD83')
xform = osr.CoordinateTransformation(nad83_proj,self.srs())
return xform
def xform_suntans_to_wgs84(self):
wgs84_proj = osr.SpatialReference()
wgs84_proj.SetFromUserInput('WGS84')
xform = osr.CoordinateTransformation(self.srs(),wgs84_proj)
return xform
def xform_wgs84_to_suntans(self):
wgs84_proj = osr.SpatialReference()
wgs84_proj.SetFromUserInput('WGS84')
xform = osr.CoordinateTransformation(wgs84_proj,self.srs())
return xform
def mllw_to_navd88(self,utm_locs):
return self.vdatum_for_utm('MLLW','NAVD88',utm_locs)
def msl_to_navd88(self,utm_locs):
return self.vdatum_for_utm('LMSL','NAVD88',utm_locs)
def vdatum_for_utm(self,src_vdatum,dest_vdatum,utm_locs):
""" given a vector of utm xy pairs, return the height that must be added to go from
the first vertical datum to the second.
"""
lonlat_locs = zeros( utm_locs.shape, float64 )
xform = self.xform_suntans_to_nad83()
for i in range(utm_locs.shape[0]):
lon,lat,dummy = xform.TransformPoint(utm_locs[i,0],utm_locs[i,1])
lonlat_locs[i,:] = [lon,lat]
return apply_vdatum(src_vdatum,dest_vdatum,lonlat_locs)
def plot_bathymetry(self,procs=None,ufunction=None,**kwargs):
def f(proc):
bath,gr = self.read_bathymetry(proc)
if ufunction:
bath = ufunction(bath)
return bath
return self.plot_scalar(f,procs,**kwargs)
def plot_edge_vector(self,u,proc,offset=0.0,**kwargs):
""" quiver plot on edges. u is a scalar, and the vector will be
constructed using the edge normals
offset shifts the origin along the edge to facilitate
multiple vectors on one edge
kwargs passed on to quiver.
"""
if u.ndim != 1:
raise Exception("velocity vector has shape %s - should be 1-D"%str(u.shape))
# g = self.grid(proc)
edata = self.edgedata(proc)
vec_u = u[:,newaxis] * edata[:,2:4]
vec_origin = edata[:,4:6] + offset*(edata[:,0])[:,newaxis] * (edata[:,3:1:-1] * [-1,1])
# slide origin over so we can put multiple arrows on
# the same edge:
# g.plot()
quiver(vec_origin[:,0],
vec_origin[:,1],
vec_u[:,0],
vec_u[:,1],
**kwargs)
def plot_scalar(self,scalar_for_proc,procs=None,clip=None,cmap=None,vmin=None,vmax=None):
""" takes care of setting all regions to the same
normalization scale. nan valued cells will be skipped
"""
if procs is None:
procs = range(self.num_processors())
pdatas = []
clim = [ inf,-inf]
for proc in procs:
gr = self.grid(proc)
scalar = scalar_for_proc(proc)
pdata = gr.plot_scalar( scalar, clip=clip,cmap=cmap )
if pdata:
valid = isfinite(scalar)
if any(valid):
this_clim = scalar[valid].min(), scalar[valid].max()
if this_clim[0] < clim[0]:
clim[0] = this_clim[0]
if this_clim[1] > clim[1]:
clim[1] = this_clim[1]
pdatas.append( pdata )
if vmin is not None:
clim[0] = vmin
if vmax is not None:
clim[1] = vmax
for p in pdatas:
p.set_clim( *clim )
return pdatas[0]
def count_3d_cells(self):
count = 0
for p in range(self.num_processors()):
cdata = self.celldata(p)
ng = self.proc_nonghost_cells(p)
count += cdata[ng,4].sum()
return count
def celldata(self,proc):
# 0,1: x,y voronoi center
# 2: area
# 3: depth at voronoi center
# 4: Nk - number of levels
# 5-7: edge indexes
# 8-10: cell neighbors
# 11-13: dot(cell-outware,edge-nx/ny)
# 14-16: distances from edges to voronoi center
if self._shared_grid:
return self._shared_grid.celldata(proc)
if proc not in self._celldata:
Ncells = self.grid(proc).Ncells()
celldata_name = self.file_path("celldata",proc)
fp = open(celldata_name,"rb")
cdata = []
for i in range(Ncells):
cdata.append( list( [float(s) for s in fp.readline().split() ] ) )
cdata = array(cdata)
self._celldata[proc] = cdata
return self._celldata[proc]
def write_celldata(self,processor):
f = self.file_path('celldata',processor)
fp = open(f,'wt')
g = self.grid(processor)
Nc = g.Ncells()
cdata = self.celldata(processor)
for i in range(Nc):
fp.write("%.6f %.6f %.6f %.6f %d %d %d %d %d %d %d %d %d %d %.6f %.6f %.6f\n"%tuple(cdata[i,:]))
fp.close()
def Nk(self,proc):
return self.celldata(proc)[:,4].astype(int32)
def Nke(self,proc):
return self.edgedata(proc)[:,6].astype(int32)
def Nkc(self,proc):
return self.edgedata(proc)[:,7].astype(int32)
def edge_bathymetry(self,proc):
""" If edge depths are defined separately, return an
xyz array of them for the given processor. Otherwise,
return None
"""
fn = self.file_path('edgedepths',proc)
if os.path.exists(fn):
return loadtxt(fn)
def read_bathymetry(self,proc):
gr = self.grid(proc)
return self.celldata(proc)[:,3],gr
def depth_for_cell(self,cell_id,proc):
bath,gr = self.read_bathymetry(proc)
return bath[cell_id]
dzmin=0.001 # someday could read this from a file...
_dz = None
def dz(self):
if self._dz is None:
vsp = open(self.file_path('vertspace'),'rt')
self._dz = array(list(map(float,vsp.read().split())))
return self._dz
def z_levels(self):
""" returns bottoms of the z-levels, but as *soundings*, and not
including z=0.
"""
if self._z_levels is None:
dz_list = self.dz()
self._z_levels = cumsum(dz_list)
return self._z_levels
def primary_boundary_datasource(self):
""" return the datasource that forces the largest number of edges.
This is a hack to find the datasource that forces the freesurface on
the ocean boundary.
"""
best_fg = None
for proc in range(self.num_processors()):
f = forcing.read_boundaries_dat(self,proc)
for fg in f.forcing_groups:
if best_fg is None or len(best_fg.edges) < len(fg.edges):
best_fg = fg
ds_index = best_fg.hydro_datasource()
return best_fg.gforce.datasources[ds_index]
def boundary_forcing(self,proc=None):
""" if proc is not given, return forcing from the first processor that has
some forced cells
this could probably get factored into boundary_inputs:BoundaryWriter
"""
if proc is None:
for proc in range(self.num_processors()):
f = forcing.read_boundaries_dat(self,proc)
if f.has_forced_edges():
return f
else:
return forcing.read_boundaries_dat(self,proc)
def topo_edges(self):
if self._topo_edges is None:
self._topo_edges = []
for p in range(self.num_processors()):
fp = open(self.file_path('topology',p),'rt')
def tokgen():
while 1:
buff = fp.readline()
if buff == "":
return
for t in buff.split():
yield int(t)
tok = tokgen()
nprocs = next(tok)
nneighs = next(tok)
for n in range(nneighs):
neigh = next(tok)
if p < neigh:
self._topo_edges.append( (p,neigh) )
return self._topo_edges
def show_topology(self,procs_per_node=4,topo_edges=None):
if topo_edges is None:
topo_edges = self.topo_edges()
# load the graph:
# the graph is stored just as a set of edges, with
# the processors numbered 0-<nprocs-1>
cla()
# graph the processor connectivity graph:
# round up:
n_nodes = 1 + (self.num_processors()-1)//procs_per_node
nodes = 2*arange(n_nodes) # space nodes out twice as much as cores
cores = arange(procs_per_node)
x,y = meshgrid(cores,nodes)
# I want an array that maps proc_number to an xy pair
proc_locs = transpose( array( (x.ravel(),y.ravel()), float64 ))
# and randomly perturb so we can see all the lines:
proc_locs[:,:1] = proc_locs[:,:1] + 0.4*(random( proc_locs[:,:1].shape ) - 0.5)
# now proc 0-3 are on a line, 4-7, etc.
for i in range(self.num_processors()):
pylab.annotate( "%i"%i, proc_locs[i] )
pylab.plot(proc_locs[:,0],proc_locs[:,1],'ro')
for e in topo_edges:
locs = proc_locs[array(e),:]
pylab.plot( locs[:,0],locs[:,1],'b-' )
pylab.axis('equal')
x1,x2,y1,y2 = pylab.axis()
x1 = x1 - 0.05*(x2 - x1)
x2 = x2 + 0.05*(x2 - x1)
y1 = y1 - 0.05*(y2 - y1)
y2 = y2 + 0.05*(y2 - y1)
pylab.axis( [x1,x2,y1,y2] )
def remap_processors(self,procs_per_node=4,do_plot=False):
import pymetis
# create the adjacency graph in the way that
# pymetis likes it
adj = [None]*s.num_processors()
topo_edges = s.topo_edges()
for a,b in topo_edges:
if adj[a] is None:
adj[a] = []
if adj[b] is None:
adj[b] = []
adj[a].append(b)
adj[b].append(a)
n_nodes = 1 + old_div((s.num_processors() - 1), procs_per_node)
cuts,parts = pymetis.part_graph(n_nodes,adjacency=adj)
print(parts)
# create a mapping of old proc nunmber to new proc number
#parts = array(parts)
mapping = -1*ones(s.num_processors())
# mapping[i] gives the new processor number for proc i
count_per_node = zeros(n_nodes)
for i in range(len(parts)):
# old proc i
my_node = parts[i]
new_proc = my_node * procs_per_node + count_per_node[my_node]
mapping[i] = new_proc
count_per_node[my_node]+=1
# now create a new topo-edges array so we can graph this...
new_topo_edges = mapping[array(s.topo_edges())]
new_topo_edges = new_topo_edges.astype(int32)
if do_plot:
pylab.clf()
pylab.subplot(121)
s.show_topology()
pylab.subplot(122)
s.show_topology(topo_edges=new_topo_edges)
def parse_output(self,output_name=None):
"""
reads the output from a run, hopefully with at least
-vv verbosity.
If the run crashed, sets self.crash to a crash object
Sets self.status to one of 'done','crash','running'
this is a work in progress (but you knew that, right?)
"""
if output_name is None:
output_name = sun_dir+'/output'
run_output = open(output_name)
self.status = 'running'
self.crash = None
while 1:
l = run_output.readline()
if not l:
break
if l.find('Run is blowing up!') >= 0:
self.status = 'crash'
m = re.match(r'Time step (\d+): Processor (\d+), Run is blowing up!',l)
if not m:
print("Failed to match against")
print(l)
else:
# got a crash
crash = SuntansCrash()
crash.sun = self
crash.step = int(m.group(1))
crash.processor = int(m.group(2))
l = run_output.readline()
for i in range(100): # search for CFL details up to 100 lines away
### Vertical Courant number:
m = re.match(r'Courant number problems at \((\d+),(\d+)\), Wmax=([-0-9\.]+), dz=([0-9\.]+) CmaxW=([0-9\.]+) > ([0-9\.]+)',
l)
if m:
crash.cell_id = int(m.group(1))
crash.z_id = int(m.group(2))
crash.w_max = float(m.group(3))
crash.dz = float(m.group(4))
crash.cmax_w = float(m.group(5))
crash.cmax_w_lim = float(m.group(6))
crash.description = SuntansCrash.vertical_courant
break
### Horizontal Courant number:
m = re.match(r'Courant number problems at \((\d+),(\d+)\), Umax=([-0-9\.]+), dx=([0-9\.]+) CmaxU=([0-9\.]+) > ([0-9\.]+)',
l)
if m:
crash.edge_id = int(m.group(1))
crash.z_id = int(m.group(2))
crash.u_max = float(m.group(3))
crash.dx = float(m.group(4))
crash.cmax_u = float(m.group(5))
crash.cmax_u_lim = float(m.group(6))
crash.description = SuntansCrash.horizontal_courant
break
print("Hmm - maybe this isn't a vertical courant number issue")
l = run_output.readline()
self.crash = crash
break
def write_bov(self,label,proc,dims,data):
"""
Write a binary file that will hopefully be readable by
visit through some naming conventions, and can be read
back into sunreader.
label: a name containing no spaces or dashes that describes
what the data is (e.g. m2_fs_amp )
dims: a list identifying the dimensions in order.
[z_level, cell, time_step]
data: an array that matches the described dimensions.
Currently there are only two grids defined in visit:
2D cells
3D cells, with z-level
"""
# Enforce this ordering on the dimensions, which comes from the
# ordering of dimensions in suntans scalar output
required_order = ['time_step','z_level','cell']
given_order = [required_order.index(s) for s in dims]
if sorted(given_order) != given_order:
raise Exception("Order of dimensions must be time_step, cell, z_level")
# Enforce the expected size of each dimension:
g = self.grid(proc)
for i in range(len(dims)):
if dims[i] == 'time_step':
print("Assuming that number of timesteps is okay")
elif dims[i] == 'cell' and data.shape[i] != g.Ncells():
print("WARNING: cell dimension - data shape is %i but grid reports %i"%(data.shape[i],g.Ncells()))
elif dims[i] == 'z_level' and data.shape[i] != self.conf_int('Nkmax'):
print("WARNING: z_level dimension - data shape is %i but Nkmax is %i"%(
data.shape[i],self.conf_int('Nkmax')))
if data.dtype != float64:
print("Converting to 64-bit floats")
data = data.astype(float64)
formatted_name = os.path.join(self.datadir,label + "-" + "-".join(dims) + ".raw.%i"%proc)
print("Writing to %s"%formatted_name)
fp = open(formatted_name,'wb')
fp.write(data.tostring())
fp.close()
def harm_decomposition(self,consts=['constant','M2'],ref_data=None,phase_units='minutes',
skip=0.5):
""" Perform a harmonic decomposition on the freesurface, using
the given constituents, and write the results to
<const name>_<phase or amp>-cell.raw.<proc>
Phase is relative to cos(t), t in simulation time.
At some point ref_data may be used to specify a timeseries that can also
be decomposed, and whose amp/phase will be used as a reference for normalizing
the others...
or set ref_data='forcing' to take the reference to be the forcing on the first forced
cell (i.e. it will loop over processors, and take the first cell with forcing data)
"""
import harm_field
if ref_data == 'forcing':
# this matches the timeline used in harm_field:
# times of freesurface output, using the second half of the run.
# for forcing it would be okay to use the entire run, but I
# think it's more consistent to decompose the forcing at the
# same times as the cell values
t = self.timeline()[self.steps_available()//2:]
forcing = None
for proc in range(self.num_processors()):
forcing = self.boundary_forcing(proc)
if forcing.n_bcells > 0:
print("Getting forcing data for boundary cell 0, processor %s"%proc)
ref_data = forcing.calc_forcing(times=t,units='seconds')
break
if forcing is None:
raise Exception("No forced boundary cells were found")
if ref_data:
ref_t,ref_vals = ref_data
# need to get the right omegas, a bit kludgy.
import harm_plot,harm_decomp
hplot = harm_plot.HarmonicPlot(sun=self,consts=consts)
my_omegas = hplot.omegas
print("Calculating decomposition for forcing data")
ref_comps = harm_decomp.decompose(ref_t,ref_vals,my_omegas)
else:
ref_comps = None
for proc in range(self.num_processors()):
print("Decomposition for processor %i"%proc)
harm_field
hplot = harm_field.HarmonicField(self,proc=proc,consts=consts,skip=skip)
print(" Calculating decomposition")
amps,phase = hplot.calc_harmonics()
if ref_comps is not None:
amps = amps/ref_comps[:,0]
phase = phase - ref_comps[:,1]
for i in range(len(consts)):
print(" Writing %s"%consts[i])
self.write_bov('%s_amp'%consts[i],proc=proc,dims=['cell'],data=amps[:,i])
phase_data = phase[:,i]
if phase_units == 'radians':
pass
elif phase_units == 'degrees':
phase_data *= (180./pi)
elif phase_units == 'minutes':
omega = hplot.omegas[i]
if omega > 0.0:
phase_data *= 1.0 / (60.0*omega)
self.write_bov('%s_phase'%consts[i],proc=proc,dims=['cell'],data=phase_data)
def edgedata(self,processor):
# 0: edge length
# 1: dg, voronoi length
# 2,3: nx, ny, edge normal
# 4,5: x,y position of center
# 6: Nke
# 7: Nkc
# 8: cell nc1 - [nx,ny] points *toward* this cell.
# 9: cell nc2 - upwind cell for positive face velocity
# 10,11: gradf - face number for nc1, nc2
# 12: marker
# 13,14: point indexes
if self._shared_grid:
return self._shared_grid.edgedata(processor)
if processor not in self._edgedata:
g = self.grid(processor)
Ne = g.Nedges()
f = self.file_path('edgedata',processor)
fp = open(f,'rt')
edgedata = zeros([Ne,15],float64)
for i in range(Ne):
edgedata[i,:] = list(map(float,fp.readline().split()))
self._edgedata[processor] = edgedata
return self._edgedata[processor]
def write_edgedata(self,processor):
f = self.file_path('edgedata',processor)
fp = open(f,'wt')
g = self.grid(processor)
Ne = g.Nedges()
edata = self.edgedata(processor)
for i in range(Ne):
fp.write("%f %f %f %f %f %f %d %d %d %d %d %d %d %d %d\n"%tuple(edata[i,:]))
fp.close()
def write_random_dataxy(self,fraction=0.05):
""" randomly choose the given fraction of cells, and write a set of profile
points (dataxy.dat) accordingly.
"""
# refuse to overwrite an existing one:
dataxy_path = self.file_path('DataLocations')
if os.path.exists(dataxy_path):
raise Exception("Please remove existing profile locations file %s first"%dataxy_path)
fp = open(dataxy_path,'wt')
for proc in range(self.num_processors()):
print("choosing cell centers from processor %i"%proc)
g = self.grid(proc)
sampling = random( g.Ncells() )
chosen = find( sampling < fraction )
chosen_points = g.vcenters()[chosen]
for winner in chosen_points:
fp.write( "%g %g\n"%(int(winner[0]),int(winner[1])) )
fp.close()
def write_initial_salinity(self,func,dimensions=3,func_by_index=0):
""" write salinity initialization for all processors based on the
values returned by func
dimensions=3: full field, func = func(x,y,z)
dimensions=1: vertical profile, func = func(z)
"""
self.write_initial_cond(func,'InitSalinityFile',dimensions,func_by_index=func_by_index)
def write_initial_temperature(self,func,dimensions=3,func_by_index=0):
self.write_initial_cond(func,'InitTemperatureFile',dimensions,func_by_index=func_by_index)
def copy_scalar_to_initial_cond(self,sun_in,
scalar_file_in='SalinityFile',
scalar_file_out='InitSalinityFile',
step=-1):
""" For copying the output of one run to the initialization of another.
Right now, the grids must be identical, including the subdomain decomposition
which makes this method pretty much useless since suntans can just do a restart
sun_in: a sun instance for the last run
scalar_file_in: which suntans.dat field has the filename for reading the
last run's scalar
scalar_file_out: which suntans.dat field has the filename for the scalar the
we are writing out
step: number of the step to read from the last run. negative means count
back from the end.
"""
# First, some sanity checks:
if self.num_processors() != sun_in.num_processors():
raise Exception("Mismatch in number of processors: %d vs %d"%(self.num_processors(),
sun_in.num_processors()))
for proc in range(self.num_processors()):
fname_in = sun_in.file_path(scalar_file_in,proc)
fname_out = self.file_path(scalar_file_out,proc)
print("Transcribing scalar from %s to %s"%(fname_in,fname_out))
# Read the old data:
g_in,scalar_in = sun_in.cell_scalar(scalar_file_in,proc,step)
g_out = self.grid(proc)
# more sanity checks:
if any(g_in.cells != g_out.cells):
raise Exception("Cell arrays don't match!")
Nc = self.grid(proc).Ncells()
Nk = self.Nk(proc)
column_starts = Nk.cumsum() - Nk
scalar = zeros( Nk.sum(), REALTYPE )
for i in range(Nc):
for k in range(Nk[i]):
# not positive about the ordering of indices for scalar_in
scalar[column_starts[i] + k] = scalar_in[i,k]
fp = open(fname,'wb')
fp.write( scalar.tostring() )
fp.close()
def write_initial_cond(self,func,scalar_file='InitSalinityFile',dimensions=3,func_by_index=0):
""" need to interpret func_by_index - if true, then the function actually takes
a cell and k, and returns the value, not by physical coordinates.
"""
if not func_by_index:
# vertical locations
nkmax = self.conf_int('Nkmax')
z_levels = concatenate( ([0],self.z_levels()) )
mid_elevations = 0.5*(z_levels[1:] + z_levels[:-1])
if dimensions == 3:
for proc in range(self.num_processors()):
fname = self.file_path(scalar_file,proc)
print("Writing initial condition to %s"%fname)
g = self.grid(proc)
if not func_by_index:
# these are the xy locations
centers = g.vcenters()
Nc = self.grid(proc).Ncells()
Nk = self.Nk(proc)
column_starts = Nk.cumsum() - Nk
scalar = zeros( Nk.sum(), REALTYPE )
for i in range(Nc):
for k in range(Nk[i]):
if func_by_index:
scalar[column_starts[i] + k] = func(proc,i,k)
else:
scalar[column_starts[i] + k] = func(centers[i,0],centers[i,1],mid_elevations[k])
fp = open(fname,'wb')
fp.write( scalar.tostring() )
fp.close()
else: # dimensions == 1
fname = self.file_path(scalar_file)
scalar = zeros( nkmax, REALTYPE)
for k in range(nkmax):
scalar[k] = func(mid_elevations[k])
fp = open(fname,'wb')
fp.write( scalar.tostring() )
fp.close()
def read_cell_scalar_log(self,label,proc=None,time_step=None):
""" a bit of a hack - this is for reading data logged by the logger.c functions.
needs to be abstracted out to the same can be used for reading scalar initialization,
or even more general where we can specify whether all z-levels or only valid z-levels
are in the file (then it could be used for reading scalar output, too)
"""
if time_step is not None:
fname = '%s-time_step-cell.raw'%label
else:
fname = '%s-cell.raw'%label
fname = os.path.join(self.datadir,fname)
if proc is not None:
fname += '.%d'%proc
fp = open(fname,'rb')
Nc = self.grid(proc).Ncells()
real_size = 8
time_stride = real_size * Nc
if time_step:
fp.seek(time_stride*time_step)
cell_scalar = fromstring(fp.read(time_stride),float64)
return cell_scalar
def read_edge_scalar(self,label,proc=None,time_step=None):
""" if time_step is None, assume it's not time varying.
if time_step is 'all', then read all timesteps.
otherwise, read a single timestep as specified
"""
if time_step is not None:
fname = '%s-time_step-edge.raw'%label
else:
fname = '%s-edge.raw'%label
if proc is not None:
fname += '.%d'%proc
Ne = self.grid(proc).Nedges()
real_size = 8
full_fname = os.path.join(self.datadir,fname)
if time_step == 'all': # try memory mapping:
frame_size = Ne * REALSIZE
nbytes = os.stat(full_fname).st_size
data_shape = (nbytes//frame_size,Ne)
edge_scalar = memmap(full_fname, dtype=REALTYPE, mode='r', shape=data_shape)
else:
fp = open(full_fname,'rb')
time_stride = real_size * Ne
if time_step:
fp.seek(time_stride*time_step)
edge_scalar = fromstring(fp.read(time_stride),float64)
fp.close()
return edge_scalar
def read_edge_z_level_scalar(self,label,proc=None,time_step=None):
if time_step is not None:
fname = '%s-time_step-edge-z_level.raw'%label
else:
fname = '%s-edge-z_level.dat'%label
if proc is not None:
fname += '.%d'%proc
fp = open(os.path.join(self.datadir,fname),'rb')
Nkc = self.Nkc(proc)
Ne = self.grid(proc).Nedges()
real_size = 8
time_stride = real_size * Nkc.sum()
if time_step:
fp.seek(time_stride*time_step)
raw_scalar = fromstring(fp.read(time_stride),float64)
# rewrite scalar in a nice x[j][k] sort of way. Note that this means
# the [j] array is a list, not a numpy array:
column_starts = Nkc.cumsum() - Nkc
edgescalar = [raw_scalar[column_starts[i]:column_starts[i]+Nkc[i]] for i in range(Ne)]
return edgescalar
def read_cell_z_level_scalar(self,label=None,fname=None,proc=None,time_step=None):
""" if fname is None, determines a file name based on the label and the structure
of the data. Note that if fname is specified, proc must still be specified as needed
but it will *not* be appended to the filename
"""
if fname is None:
if time_step is not None:
fname = '%s-time_step-cell-z_level.raw'%label
else:
fname = '%s-cell-z_level.raw'%label
if proc is not None:
fname += '.%d'%proc
fp = open(os.path.join(self.datadir,fname),'rb')
Nk = self.Nk(proc)
Nc = self.grid(proc).Ncells()
real_size = 8
time_stride = real_size * Nk.sum()
if time_step:
fp.seek(time_stride*time_step)
raw_scalar = fromstring(fp.read(time_stride),float64)
# rewrite scalar in a nice x[j][k] sort of way. Note that this means
# the [j] array is a list, not a numpy array:
column_starts = Nk.cumsum() - Nk
cellscalar = [raw_scalar[column_starts[i]:column_starts[i]+Nk[i]] for i in range(Nc)]
return cellscalar
def closest_profile_point(self,p):
pnts = self.profile_points()
dists = sqrt( (pnts[:,0]-p[0])**2 + (pnts[:,1]-p[1])**2 )
return argmin(dists)
def closest_cell(self,xy,full=0):
""" Return proc,cell_id for the closest cell to the given point, across
all processors. Slow...
full==0: each subdomain will only consider cells that contain the closest global
point. as long as all points are part of a cell, this should be fine.
full==1: if the closest point isn't in a cell, consider *all* cells.
"""
ids = []
dists = []
# Read the full grid once, build its index.
#print "Closest cell - reading full grid"
gfull = self.grid()
#print "building its index"
gfull.build_index()
i = gfull.closest_point(xy)
#print "Got closest index %d"%i
c_list = [] # a list of cell ids, one per processor
dist_list = [] # the distances, same deal
for p in range(self.num_processors()):
#print "Checking on processor %i"%p
#print "Reading its grid"
g = self.grid(p)
# c_p = g.closest_cell(xy,full=full)
## this is basically the body of trigrid::closest_cell()
try:
cells = list( g.pnt2cells(i) )
except KeyError:
if not full:
# note that this processor didn't have a good match
c_list.append(-1)
dist_list.append(inf)
continue
else:
print("This must be on a subdomain. The best point wasn't in one of our cells")
cells = list(range(g.Ncells()))
# found some candidate cells -
# choose based on distance to vcenter
cell_centers = g.vcenters()[cells]
dists = ((xy-cell_centers)**2).sum(axis=1)
# closest cell on proc p
chosen = cells[argmin(dists)]
c_list.append( chosen )
dist_list.append( dists.min() )
#
dist_list = array(dist_list)
best_p = argmin(dist_list)
best_c = c_list[best_p]
return best_p,best_c
def grid_stats(self):
""" Print a short summary of grid information
This needs to be run on a domain that has already been decomposed
into subdomains (such that celldata.dat.* files exist)
"""
procs = list(range(self.num_processors()))
twoD_cells = 0
threeD_cells = 0
for p in procs:
print("...Reading subdomain %d"%p)
g=self.grid(p)
nonghost = self.proc_nonghost_cells(p)
twoD_cells += len(nonghost)
threeD_cells += sum(self.Nk(p)[nonghost])
print("Total 2D cells: %d"%twoD_cells)
print("Total 3D cells: %d"%threeD_cells)
# Some distance metrics:
dg_per_proc = []
for p in procs:
print("...Reading subdomain %d"%p)
edgedata = self.edgedata(p)
valid = (edgedata[:,12] == 0) | ((edgedata[:,12]==5) & (edgedata[:,8]>=0))
dg_per_proc.append( edgedata[valid,1] )
dg_total = concatenate(dg_per_proc)
print("Spacing between adjacent voronoi centers [m]:")
print(" Mean=%f Median=%f Min=%f Max=%f"%(mean(dg_total),median(dg_total),
dg_total.min(), dg_total.max()))
def calc_Cvol_distribution(self):
""" from storefile, calculate some simple statistics for the distribution
of volumetric courant number. prints them out and returns a list of
[ ('name',value), ... ]
"""
# Choose a full-grid output to analyze - since we need the storefile
# as well, we pretty much have to choose the last full grid output
n = self.steps_available() - 1
dt = self.conf_float('dt')
all_Cvol = [] # will append single Cvol numbers
all_df = [] # will append arrays for valid edges
all_dg = [] # same
dz = self.dz()
dzmin_surface = 2*self.dzmin # the dzmin cutoff for merging a surface cell with the one below it.
z = -self.z_levels() # elevation of the bottom of each z level
for proc in range(self.num_processors()):
print("Proc %d"%proc)
cdata = self.celldata(proc)
edata = self.edgedata(proc)
Ac = cdata[:,2]
bed = -cdata[:,3]
valid_edge = (edata[:,12] == 0) | ( (edata[:,12] == 5) & (edata[:,8]>=0))
all_df.append( edata[valid_edge,0] )
all_dg.append( edata[valid_edge,1] )
store = StoreFile(self,proc)
h = store.freesurface()
u = store.u()
# calculate dz, dzf
g = self.grid(proc)
ctops = self.h_to_ctop(h,dzmin_surface)
etops = zeros( g.Nedges(), int32 )
h_edge= zeros( g.Nedges(), float64 )
bed_edge = zeros( g.Nedges(), float64 )
Nke = edata[:,6].astype(int32)
dzf = [None] * g.Nedges()
for j in range( g.Nedges() ):
# we don't have exact information here, but assume that the top, non-zero velocity
# in u corresponds to the top edge.
nc1,nc2 = edata[j,8], edata[j,9]
if nc1 < 0:
nc = nc2
bed_edge[j] = bed[nc2]
elif nc2 < 0:
nc = nc1
bed_edge[j] = bed[nc1]
else:
bed_edge[j] = max( bed[nc1], bed[nc2] )
nz = nonzero( u[j] )[0]
if len(nz) > 0:
u_for_upwind = u[j][nz[0]]
if u_for_upwind > 0:
nc = nc2
else:
nc = nc1
else:
if h[nc1] > h[nc2]:
nc = nc1
else:
nc = nc2
etops[j] = ctops[nc]
h_edge[j] = h[nc]
# fill in dzf:
this_dzf = dz[:Nke[j]].copy()
this_dzf[:etops[j]] = 0.0 # empty cells above fs
this_dzf[etops[j]] = h_edge[j] - z[etops[j]] # recalc surface cell
this_dzf[Nke[j]-1] -= bed_edge[j] - z[Nke[j]-1] # trim bed layer (which could be same as surface)
dzf[j] = this_dzf
# Now loop through cells, and get volumetric courant numbers:
for i in self.proc_nonghost_cells(proc): # range(g.Ncells()):
Nk = int(cdata[i,4])
dzc = dz[:Nk].copy()
dzc[:ctops[i]] = 0.0 # empty cells above fs
dzc[ctops[i]] = h[i] - z[ctops[i]] # recalc surface cell
dzc[Nk-1] -= bed[i] - z[Nk-1] # trim bed layer (which could be same as surface)
for k in range(ctops[i],Nk):
V = Ac[i] * dzc[k]
Q = 0
for j,normal in zip( cdata[i,5:8].astype(int32), cdata[i,11:14]):
df = edata[j,0]
if k == ctops[i]:
# surface cell gets flux from edges here and up
# (which may be below ctop, anyway)
kstart = etops[j]
else:
kstart = k
for kk in range(kstart,min(k+1,Nke[j])):
# so now kk is guaranteed to be valid layer for the edge j
if u[j][kk] * normal > 0: # it's an outflow
Q += u[j][kk] * normal * df * dzf[j][kk]
C = Q * dt / V
all_Cvol.append(C)
all_Cvol = array(all_Cvol)
all_df = concatenate( all_df )
all_dg = concatenate( all_dg )
# and calculate the effect of substepping to get an effective mean Cvol:
nsubsteps = ceil(all_Cvol.max())
dt = self.conf_float('dt')
sub_dt = dt/nsubsteps
mean_sub_Cvol = mean( all_Cvol/nsubsteps )
# print "Mean Cvol: %f"%all_Cvol.mean()
# print "Mean df: %f"%all_df.mean()
# print "Mean dg: %f"%all_dg.mean()
return [ ('Mean Cvol',all_Cvol.mean()),
('Mean df',all_df.mean()),
('Mean dg',all_dg.mean()),
('dt',dt),
('sub_dt',sub_dt),
('nsubsteps',nsubsteps),
('Mean subCvol',mean_sub_Cvol)]
def all_dz(self,proc,time_step):
""" Return a 2-D array of dz values all_dz[cell,k]
dry cells are set to 0, and the bed and freesurface height are
taken into account. Useful for depth-integrating.
"""
cdata = self.celldata(proc)
Nk = cdata[:,4].astype(int32)
bed = -cdata[:,3]
all_dz = (self.dz())[newaxis,:].repeat(len(Nk),axis=0)
z = -self.z_levels()
h = self.freesurface(proc,[time_step])[0]
ctops = self.h_to_ctop(h)
for i in range(len(Nk)):
all_dz[i,:ctops[i]] = 0.0 # empty cells above fs
all_dz[i,Nk[i]:] = 0.0 # empty cells below bed
all_dz[i,ctops[i]] = h[i] - z[ctops[i]] # recalc surface cell
all_dz[i,Nk[i]-1] -= bed[i] - z[Nk[i]-1] # trim bed layer (which could be same as surface)
return all_dz
def averaging_weights(self,proc,time_step,ztop=None,zbottom=None,dz=None):
""" Returns weights as array [Nk] to average over a cell-centered quantity
for the range specified by ztop,zbottom, and dz.
range is specified by 2 of the 3 of ztop, zbottom, dz, all non-negative.
ztop: distance from freesurface
zbottom: distance from bed
dz: thickness
if the result would be an empty region, return nans.
this thing is slow! - lots of time in adjusting all_dz
"""
cdata = self.celldata(proc)
Nk = cdata[:,4].astype(int32)
bed = -cdata[:,3]
one_dz = self.dz()
all_dz = one_dz[newaxis,:].repeat(len(cdata),axis=0)
all_k = arange(len(one_dz))[None,:].repeat(len(cdata),axis=0)
z = -self.z_levels()
h = self.freesurface(proc,[time_step])[0]
# adjust bed and
# 3 choices here..
# try to clip to reasonable values at the same time:
if ztop is not None:
if ztop != 0:
h = h - ztop # don't modify h
# don't allow h to go below the bed
h[ h<bed ] = bed
if dz is not None:
# don't allow bed to be below the real bed.
bed = maximum( h - dz, bed)
if zbottom is not None:
# no clipping checks for zbottom yet.
if zbottom != 0:
bed = bed + zbottom # don't modify bed!
if dz is not None:
h = bed + dz
# so now h and bed are elevations bounding the integration region
ctops = self.h_to_ctop(h)
# default h_to_ctop will use the dzmin appropriate for the surface,
# but at the bed, it goes the other way - safest just to say dzmin=0,
# and also clamp to known Nk
cbeds = self.h_to_ctop(bed,dzmin=0) + 1 # it's an exclusive index
cbeds[ cbeds > Nk ] = Nk[ cbeds > Nk ]
if 0: # non vectorized - takes 0.8s per processor on the test run
for i in range(len(Nk)):
all_dz[i,:ctops[i]] = 0.0 # empty cells above fs
all_dz[i,cbeds[i]:] = 0.0 # empty cells below bed - might be an error...
all_dz[i,ctops[i]] = h[i] - z[ctops[i]] # recalc surface cell
all_dz[i,cbeds[i]-1] -= bed[i] - z[cbeds[i]-1] # trim bed layer (which could be same as surface)
else: # attempt to vectorize - about 70x speedup
# seems that there is a problem with how dry cells are handled -
# for the exploratorium display this ending up with a number of cells with
# salinity close to 1e6.
# in the case of a dry cell, ctop==cbed==Nk[i]
#
drymask = (all_k < ctops[:,None]) | (all_k>=cbeds[:,None])
all_dz[drymask] = 0.0
ii = arange(len(cdata))
all_dz[ii,ctops] = h - z[ctops]
all_dz[ii,cbeds-1] -= bed - z[cbeds-1]
# make those weighted averages
# have to add extra axis to get broadcasting correct
all_dz = all_dz / sum(all_dz,axis=1)[:,None]
return all_dz
| mit |
thast/EOSC513 | DC/SparseGN/WVT_DB4_withoutW/WVT_DB4_withoutW.py | 1 | 10446 | from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
import pandas as pd
from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
from scipy.interpolate import LinearNDInterpolator, interp1d
from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
import time
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 2**7-24,2**7-12
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
def getCylinderPoints(xc,zc,r):
xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1,zLoc1]).T
topHalf = topHalf[0:-1,:]
bottomHalf = np.vstack([xLoc2,zLoc2]).T
bottomHalf = bottomHalf[0:-1,:]
cylinderPoints = np.vstack([topHalf,bottomHalf])
cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
return cylinderPoints
cylinderPoints0 = getCylinderPoints(x0,z1,r0)
cylinderPoints1 = getCylinderPoints(x1,z1,r1)
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
dmis = DataMisfit.l2_DataMisfit(survey)
survey.dpred(mtrue)
survey.makeSyntheticData(mtrue,std=0.05,force=True)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
print '# of data: ', survey.dobs.shape
from SimPEG.Maps import IdentityMap
import pywt
class WaveletMap(IdentityMap):
def __init__(self, mesh=None, nP=None, **kwargs):
super(WaveletMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m, wv = 'db4'):
coeff_wv = pywt.wavedecn(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
return Utils.mkvc(array_wv[0])
def deriv(self, m, v=None, wv = 'db4'):
if v is not None:
coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
return Utils.mkvc(array_wv[0])
else:
print "not implemented"
def inverse(self, m, wv = 'db4'):
msyn = np.zeros(mesh.nC)
coeff_wv = pywt.wavedecn(msyn.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
coeff_back = pywt.array_to_coeffs(m.reshape(array_wv[0].shape, order = 'F'),array_wv[1])
coeff_m = pywt.waverecn(coeff_back,wv, mode = 'per')
return Utils.mkvc(coeff_m)
class iWaveletMap(IdentityMap):
def __init__(self, mesh, nP=None, **kwargs):
super(iWaveletMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m, wv = 'db4'):
msyn = np.zeros(mesh.nC)
coeff_map = pywt.wavedecn(msyn.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_map = pywt.coeffs_to_array(coeff_map)
coeff_map = pywt.array_to_coeffs(m.reshape(array_map[0].shape,order= 'F'),array_map[1])
coeff_back_map = pywt.waverecn(coeff_map,wv, mode = 'per')
return Utils.mkvc(coeff_back_map)
def deriv(self, m, v=None, wv = 'db4'):
if v is not None:
coeff_wv = pywt.wavedecn(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
coeff_back = pywt.array_to_coeffs(v,array_wv[1])
coeff_m = pywt.waverecn(coeff_back,wv, mode = 'per')
return Utils.mkvc(coeff_m)
else:
print "not implemented"
def inverse(self, m, wv = 'db4'):
coeff_wv = pywt.wavedecn(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'),wv, mode = 'per')
array_wv = pywt.coeffs_to_array(coeff_wv)
return Utils.mkvc(array_wv[0])
wavmap = WaveletMap(mesh)
iwavmap = iWaveletMap(mesh)
import spgl1
#Parameter for SPGL1 iterations
nits = 10
mwav = (-5.)*np.ones_like(mtrue)
it = 0
phi_d_normal = np.load('../../NormalInversion/NormalInversion/phid_normal.npy')
ratio = np.r_[6.5,phi_d_normal[0:-1]/phi_d_normal[1:]]
#ratio = 10.*np.ones(nits)
min_progress = 1.2
xlist = []
#Parameters for W
#nsubSrc = 5
#InnerIt = 1
#dmisfitsub = []
#Initialize Random Source
#W = np.random.randn(survey.nSrc,nsubSrc)
#problem.unpair()
#roblem.pair(survey)
#Q = problem.getRHS()
#sub = problem.getRHS().dot(W)
#rx_r = SimultaneousRx(locs=P)
#srcList_r = []
#for isrc in range(sub.shape[1]):
# src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
# srcList_r.append(src_r)
#survey_r = DC.Survey(srcList_r)
#problem.unpair()
#problem.pair(survey_r)
d = survey.dpred(mtrue)
survey.dobs = d
survey.std = np.ones_like(d)*0.05
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
dmisfitall = []
dmisfitall.append(dmis.eval(mwav)/survey.nD)
timespgl1 = []
print "end iteration: ",it, '; Overall Normalized Misfit: ', dmis.eval(mwav)/survey.nD
while (dmis.eval(mwav)/survey.nD)>0.5 and it<nits:
def JS(x,mode):
if mode == 1:
return problem.Jvec(mwav,iwavmap*x)
else:
return wavmap*problem.Jtvec(mwav,x)
b = survey.dpred(mwav)-survey.dpred(mtrue)
print '# of data: ',b.shape
opts = spgl1.spgSetParms({'iterations':100, 'verbosity':2})
sigtol = np.linalg.norm(b)/np.maximum(ratio[it],min_progress)
#tautol = 20000.
tic = time.clock()
x,resid,grad,info = spgl1.spg_bpdn(JS, b, sigma = sigtol,options=opts)
toc = time.clock()
print 'SPGL1 chronometer: ',toc - tic
timespgl1.append(toc-tic)
#x,resid,grad,info = spgl1.spg_lasso(JS,b,tautol,opts)
#assert dmis.eval(mwav) > dmis.eval(mwav - iwavmap*x)
mwav = mwav - iwavmap*x
it +=1
print "end iteration: ",it, '; Normalized Misfit: ', dmis.eval(mwav)/survey.nD
dmisfitall.append(dmis.eval(mwav)/survey.nD)
xlist.append(x)
np.save('./dmisfitall.npy',dmisfitall)
np.save('./mfinal.npy',mwav)
np.savez('./xlist.npz',xlist)
np.save('./time.npz',timespgl1)
mm = mesh.plotImage(mwav)
plt.colorbar(mm[0])
plt.gca().set_xlim([-10.,10.])
plt.gca().set_ylim([-10.,0.])
plt.plot(cylinderPoints0[:,0],cylinderPoints0[:,1], linestyle = 'dashed', color='k')
plt.plot(cylinderPoints1[:,0],cylinderPoints1[:,1], linestyle = 'dashed', color='k')
plt.show() | mit |
blaze/dask | dask/dataframe/utils.py | 1 | 32073 | import math
import numbers
import re
import textwrap
from collections.abc import Iterator, Mapping
import sys
import traceback
from contextlib import contextmanager
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_scalar,
is_sparse,
is_period_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
)
# include these here for compat
from ._compat import ( # noqa: F401
PANDAS_VERSION,
PANDAS_GT_0240,
PANDAS_GT_0250,
PANDAS_GT_100,
PANDAS_GT_110,
HAS_INT_NA,
tm,
)
from .extensions import make_array_nonempty, make_scalar
from ..base import is_dask_collection
from ..core import get_deps
from ..local import get_sync
from ..utils import asciitable, is_arraylike, Dispatch, typename
from ..utils import is_dataframe_like as dask_is_dataframe_like
from ..utils import is_series_like as dask_is_series_like
from ..utils import is_index_like as dask_is_index_like
# register pandas extension types
from . import _dtypes # noqa: F401
from . import methods
def is_integer_na_dtype(t):
dtype = getattr(t, "dtype", t)
if HAS_INT_NA:
types = (
pd.Int8Dtype,
pd.Int16Dtype,
pd.Int32Dtype,
pd.Int64Dtype,
pd.UInt8Dtype,
pd.UInt16Dtype,
pd.UInt32Dtype,
pd.UInt64Dtype,
)
else:
types = ()
return isinstance(dtype, types)
def shard_df_on_index(df, divisions):
"""Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[: indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i] : indices[i + 1]]
yield df.iloc[indices[-1] :]
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided (note that the order of
the names should match the order of the columns). Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
indent = " " * kwargs.get("pad", 8)
body = textwrap.wrap(
_META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78
)
descr = "{0}\n{1}".format(_META_TYPES, "\n".join(body))
if f.__doc__:
if "$META" in f.__doc__:
f.__doc__ = f.__doc__.replace("$META", descr)
else:
# Put it at the end of the parameters section
parameter_header = "Parameters\n%s----------" % indent[4:]
first, last = re.split("Parameters\\n[ ]*----------", f.__doc__)
parameters, rest = last.split("\n\n", 1)
f.__doc__ = "{0}{1}{2}\n{3}{4}\n\n{5}".format(
first, parameter_header, parameters, indent[4:], descr, rest
)
return f
@contextmanager
def raise_on_meta_error(funcname=None, udf=False):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
msg = "Metadata inference failed{0}.\n\n"
if udf:
msg += (
"You have supplied a custom function and Dask is unable to \n"
"determine the type of output that that function returns. \n\n"
"To resolve this please provide a meta= keyword.\n"
"The docstring of the Dask function you ran should have more information.\n\n"
)
msg += (
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
)
msg = msg.format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb)
raise ValueError(msg) from e
UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__"
def has_known_categories(x):
"""Returns whether the categories in `x` are known.
Parameters
----------
x : Series or CategoricalIndex
"""
x = getattr(x, "_meta", x)
if is_series_like(x):
return UNKNOWN_CATEGORIES not in x.cat.categories
elif is_index_like(x) and hasattr(x, "categories"):
return UNKNOWN_CATEGORIES not in x.categories
raise TypeError("Expected Series or CategoricalIndex")
def strip_unknown_categories(x, just_drop_unknown=False):
"""Replace any unknown categoricals with empty categoricals.
Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
cat_mask = x.dtypes == "category"
if cat_mask.any():
cats = cat_mask[cat_mask].index
for c in cats:
if not has_known_categories(x[c]):
if just_drop_unknown:
x[c].cat.remove_categories(UNKNOWN_CATEGORIES, inplace=True)
else:
x[c].cat.set_categories([], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype) and not has_known_categories(x):
x.cat.set_categories([], inplace=True)
if isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(
x.index
):
x.index = x.index.set_categories([])
elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):
x = x.set_categories([])
return x
def clear_known_categories(x, cols=None, index=True):
"""Set categories to be unknown.
Parameters
----------
x : DataFrame, Series, Index
cols : iterable, optional
If x is a DataFrame, set only categoricals in these columns to unknown.
By default, all categorical columns are set to unknown categoricals
index : bool, optional
If True and x is a Series or DataFrame, set the clear known categories
in the index as well.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = x.dtypes == "category"
if cols is None:
cols = mask[mask].index
elif not mask.loc[cols].all():
raise ValueError("Not all columns are categoricals")
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if index and isinstance(x.index, pd.CategoricalIndex):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == "category":
return pd.Series(
pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index
).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
make_meta = Dispatch("make_meta")
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
@make_meta.register(object)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')]) # doctest: +SKIP
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8')) # doctest: +SKIP
Series([], Name: a, dtype: float64)
>>> make_meta('i8') # doctest: +SKIP
1
"""
if hasattr(x, "_meta"):
return x._meta
elif is_arraylike(x) and x.shape:
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
meta_nonempty = Dispatch("meta_nonempty")
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
dt_s_dict = dict()
data = dict()
for i, c in enumerate(x.columns):
series = x.iloc[:, i]
dt = series.dtype
if dt not in dt_s_dict:
dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)
data[i] = dt_s_dict[dt]
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
if PANDAS_GT_100:
res.attrs = x.attrs
return res
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of type {0}".format(typename(type(idx)))
)
hash_object_dispatch = Dispatch("hash_object_dispatch")
@hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def hash_object_pandas(
obj, index=True, encoding="utf8", hash_key=None, categorize=True
):
return pd.util.hash_pandas_object(
obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize
)
group_split_dispatch = Dispatch("group_split_dispatch")
@group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def group_split_pandas(df, c, k, ignore_index=False):
indexer, locations = pd._libs.algos.groupsort_indexer(
c.astype(np.int64, copy=False), k
)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [
df2.iloc[a:b].reset_index(drop=True) if ignore_index else df2.iloc[a:b]
for a, b in zip(locations[:-1], locations[1:])
]
return dict(zip(range(k), parts))
_simple_fake_mapping = {
"b": np.bool_(True),
"V": np.void(b" "),
"M": np.datetime64("1970-01-01"),
"m": np.timedelta64(1),
"S": np.str_("foo"),
"a": np.str_("foo"),
"U": np.unicode_("foo"),
"O": "foo",
}
def _scalar_from_dtype(dtype):
if dtype.kind in ("i", "f", "u"):
return dtype.type(1)
elif dtype.kind == "c":
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ("m", "M") else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
def _nonempty_scalar(x):
if type(x) in make_scalar._lookup:
return make_scalar(x)
if np.isscalar(x):
dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x))
return make_scalar(dtype)
raise TypeError("Can't handle meta of type '{0}'".format(typename(type(x))))
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = s.cat.categories[:0]
data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)
elif is_integer_na_dtype(dtype):
data = pd.array([1, None], dtype=dtype)
elif is_period_dtype(dtype):
# pandas 0.24.0+ should infer this to be Series[Period[freq]]
freq = dtype.freq
data = [pd.Period("2000", freq), pd.Period("2001", freq)]
elif is_sparse(dtype):
# TODO: pandas <0.24
# Pandas <= 0.23.4:
if PANDAS_GT_0240:
entry = _scalar_from_dtype(dtype.subtype)
else:
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_100:
data = pd.array([entry, entry], dtype=dtype)
else:
data = pd.SparseArray([entry, entry], dtype=dtype)
elif is_interval_dtype(dtype):
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_0240:
data = pd.array([entry, entry], dtype=dtype)
else:
data = np.array([entry, entry], dtype=dtype)
elif type(dtype) in make_array_nonempty._lookup:
data = make_array_nonempty(dtype)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
out = pd.Series(data, name=s.name, index=idx)
if PANDAS_GT_100:
out.attrs = s.attrs
return out
def is_dataframe_like(df):
return dask_is_dataframe_like(df)
def is_series_like(s):
return dask_is_series_like(s)
def is_index_like(s):
return dask_is_index_like(s)
def check_meta(x, meta, funcname=None, numeric_equal=True):
"""Check that the dask metadata matches the result.
If metadata matches, ``x`` is passed through unchanged. A nice error is
raised if metadata doesn't match.
Parameters
----------
x : DataFrame, Series, or Index
meta : DataFrame, Series, or Index
The expected metadata that ``x`` should match
funcname : str, optional
The name of the function in which the metadata was specified. If
provided, the function name will be included in the error message to be
more helpful to users.
numeric_equal : bool, optionl
If True, integer and floating dtypes compare equal. This is useful due
to panda's implicit conversion of integer to floating upon encountering
missingness, which is hard to infer statically.
"""
eq_types = {"i", "f", "u"} if numeric_equal else set()
def equal_dtypes(a, b):
if is_categorical_dtype(a) != is_categorical_dtype(b):
return False
if isinstance(a, str) and a == "-" or isinstance(b, str) and b == "-":
return False
if is_categorical_dtype(a) and is_categorical_dtype(b):
if UNKNOWN_CATEGORIES in a.categories or UNKNOWN_CATEGORIES in b.categories:
return True
return a == b
return (a.kind in eq_types and b.kind in eq_types) or (a == b)
if not (
is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta)
) or is_dask_collection(meta):
raise TypeError(
"Expected partition to be DataFrame, Series, or "
"Index, got `%s`" % typename(type(meta))
)
if type(x) != type(meta):
errmsg = "Expected partition of type `%s` but got `%s`" % (
typename(type(meta)),
typename(type(x)),
)
elif is_dataframe_like(meta):
dtypes = pd.concat([x.dtypes, meta.dtypes], axis=1, sort=True)
bad_dtypes = [
(repr(col), a, b)
for col, a, b in dtypes.fillna("-").itertuples()
if not equal_dtypes(a, b)
]
if bad_dtypes:
errmsg = "Partition type: `%s`\n%s" % (
typename(type(meta)),
asciitable(["Column", "Found", "Expected"], bad_dtypes),
)
else:
check_matching_columns(meta, x)
return x
else:
if equal_dtypes(x.dtype, meta.dtype):
return x
errmsg = "Partition type: `%s`\n%s" % (
typename(type(meta)),
asciitable(["", "dtype"], [("Found", x.dtype), ("Expected", meta.dtype)]),
)
raise ValueError(
"Metadata mismatch found%s.\n\n"
"%s" % ((" in `%s`" % funcname if funcname else ""), errmsg)
)
def check_matching_columns(meta, actual):
# Need nan_to_num otherwise nan comparison gives False
if not np.array_equal(np.nan_to_num(meta.columns), np.nan_to_num(actual.columns)):
extra = methods.tolist(actual.columns.difference(meta.columns))
missing = methods.tolist(meta.columns.difference(actual.columns))
if extra or missing:
extra_info = f" Extra: {extra}\n Missing: {missing}"
else:
extra_info = "Order of columns does not match"
raise ValueError(
"The columns in the computed data do not match"
" the columns in the provided metadata\n"
f"{extra_info}"
)
def index_summary(idx, name=None):
"""Summarized representation of an Index."""
n = len(idx)
if name is None:
name = idx.__class__.__name__
if n:
head = idx[0]
tail = idx[-1]
summary = ", {} to {}".format(head, tail)
else:
summary = ""
return "{}: {} entries{}".format(name, n, summary)
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True, result=None):
import dask.dataframe as dd
if hasattr(dsk, "__dask_graph__"):
graph = dsk.__dask_graph__()
if hasattr(graph, "validate"):
graph.validate()
if result is None:
result = dsk.compute(scheduler="sync")
if isinstance(dsk, dd.Index):
assert "Index" in type(result).__name__, type(result)
# assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
assert dsk.name == result.name
assert dsk._meta.name == result.name
if isinstance(result, pd.MultiIndex):
assert result.names == dsk._meta.names
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert "Series" in type(result).__name__, type(result)
assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(
dsk.index,
check_names=check_names,
check_dtypes=check_dtypes,
result=result.index,
)
elif isinstance(dsk, dd.DataFrame):
assert "DataFrame" in type(result).__name__, type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(
dsk.index,
check_names=check_names,
check_dtypes=check_dtypes,
result=result.index,
)
elif isinstance(dsk, dd.core.Scalar):
assert np.isscalar(result) or isinstance(
result, (pd.Timestamp, pd.Timedelta)
)
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = "Unsupported dask instance {0} found".format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if is_dataframe_like(a):
if set(a.index.names) & set(a.columns):
a.index.names = [
"-overlapped-index-name-%d" % i for i in range(len(a.index.names))
]
a = a.sort_values(by=methods.tolist(a.columns))
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(
a,
b,
check_names=True,
check_dtypes=True,
check_divisions=True,
check_index=True,
**kwargs,
):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
if hasattr(a, "divisions") and hasattr(b, "divisions"):
at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python
bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion
assert at == bt, (at, bt)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if hasattr(a, "to_pandas"):
a = a.to_pandas()
if hasattr(b, "to_pandas"):
b = b.to_pandas()
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, "dask"):
dask = dask.dask
assert isinstance(dask, Mapping)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
raise AssertionError(
"given dask graph doesn't contain label: {label}".format(label=label)
)
def assert_divisions(ddf):
if not hasattr(ddf, "divisions"):
return
if not getattr(ddf, "known_divisions", False):
return
def index(x):
if is_index_like(x):
return x
try:
return x.index.get_level_values(0)
except AttributeError:
return x.index
results = get_sync(ddf.dask, ddf.__dask_keys__())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, "dask"):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert " " not in k
assert k.split("-")[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_type_sets = [{"O", "S", "U", "a"}] # treat object and strings alike
if numeric_equal:
eq_type_sets.append({"i", "f", "u"})
def eq_dtypes(a, b):
return any(
a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets
) or (a == b)
if not is_dask_collection(res) and is_dataframe_like(res):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples():
assert eq_dtypes(a, b)
elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):
a = ddf._meta.dtype
b = res.dtype
assert eq_dtypes(a, b)
else:
if hasattr(ddf._meta, "dtype"):
a = ddf._meta.dtype
if not hasattr(res, "dtype"):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert eq_dtypes(a, b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
def valid_divisions(divisions):
"""Are the provided divisions valid?
Examples
--------
>>> valid_divisions([1, 2, 3])
True
>>> valid_divisions([3, 2, 1])
False
>>> valid_divisions([1, 1, 1])
False
>>> valid_divisions([0, 1, 1])
True
>>> valid_divisions(123)
False
>>> valid_divisions([0, float('nan'), 1])
False
"""
if not isinstance(divisions, (tuple, list)):
return False
for i, x in enumerate(divisions[:-2]):
if x >= divisions[i + 1]:
return False
if isinstance(x, numbers.Number) and math.isnan(x):
return False
for x in divisions[-2:]:
if isinstance(x, numbers.Number) and math.isnan(x):
return False
if divisions[-2] > divisions[-1]:
return False
return True
def drop_by_shallow_copy(df, columns, errors="raise"):
"""Use shallow copy to drop columns in place"""
df2 = df.copy(deep=False)
if not pd.api.types.is_list_like(columns):
columns = [columns]
df2.drop(columns=columns, inplace=True, errors=errors)
return df2
| bsd-3-clause |
andres-de-castro/kineSYS | ssvep_trainer.py | 1 | 6516 | #!usb/bin/env/python
import ctypes
import datetime
import os
import speech
import time
import subprocess
import sys
import pandas as pd
from ctypes import *
from numpy import *
from ctypes.util import find_library
# print (ctypes.util.find_library('edk.dll'))
# print (os.path.exists('edk.dll'))
libEDK = cdll.LoadLibrary("edk.dll")
header = ['COUNTER','AF3','F7','F3','FC5','T7','P7','O1','O2','P8','T8','FC6','F4','F8','AF4','GYROX','GYROY','TIMESTAMP','FUNC_ID','FUNC_VALUE','MARKER','SYNC_SIGNAL','USER','CLASS']
# subprocess.Popen(["python", "ssvep_flash.py"])
if len(sys.argv) == 1:
print "Please enter the name of the user being tested on"
user_name = str(raw_input()).lower()
print "Please enter the session time"
session_time = float(raw_input())
print "Please enter the number of sessions"
number_of_sessions = int(raw_input())
print "Please enter type of class"
class_type = str(raw_input())
else:
user_name = sys.argv[1]
session_time = float(sys.argv[2])
number_of_sessions = int(sys.argv[3])
class_type = str(sys.argv[4])
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d--%H-%M')
def record_data(session_time=8.0, target=None):
session_log = pd.DataFrame()
hData = libEDK.EE_DataCreate()
libEDK.EE_DataSetBufferSizeInSec(secs)
start_time = time.time()
# print ("Buffer size in secs:")
while time.time() < start_time + session_time + 1:
if state == 0:
eventType = libEDK.EE_EmoEngineEventGetType(eEvent)
libEDK.EE_EmoEngineEventGetUserId(eEvent, user)
#libEDK.EE_Event_enum.EE_UserAdded:
if eventType == 16:
libEDK.EE_DataAcquisitionEnable(userID, True)
readytocollect = True
if readytocollect is True:
libEDK.EE_DataUpdateHandle(0, hData)
libEDK.EE_DataGetNumberOfSample(hData, nSamplesTaken)
print ("Wrote ", nSamplesTaken[0])
if nSamplesTaken[0] != 0:
nSam = nSamplesTaken[0]
arr = (ctypes.c_double * nSamplesTaken[0])()
ctypes.cast(arr, ctypes.POINTER(ctypes.c_double))
#libEDK.EE_DataGet(hData, 3,byref(arr), nSam)
# data = array('d')#zeros(nSamplesTaken[0],double)
for sampleIdx in range(nSamplesTaken[0]):
row = []
for i in range(22):
libEDK.EE_DataGet(hData,targetChannelList[i], byref(arr), nSam)
# print >>f,arr[sampleIdx],",",
row.append(str(arr[sampleIdx]))
row = ','.join(row) + ',' + user_name + ',' + str(target)
row = row.split(',')
session_log = session_log.append([row])
time.sleep(0.2)
libEDK.EE_DataFree(hData)
session_log.columns = header
return session_log[:((int(session_time) - 1)*128)]
ED_COUNTER = 0
ED_INTERPOLATED = 1
ED_RAW_CQ = 2
ED_AF3 = 3
ED_F7 = 4
ED_F3 = 5
ED_FC5 = 6
ED_T7 = 7
ED_P7 = 8
ED_O1 = 9
ED_O2 = 10
ED_P8 = 11
ED_T8 = 12
ED_FC6 = 13
ED_F4 = 14
ED_F8 = 15
ED_AF4 = 16
ED_GYROX = 17
ED_GYROY = 18
ED_TIMESTAMP = 19
ED_ES_TIMESTAMP = 20
ED_FUNC_ID = 21
ED_FUNC_VALUE = 22
ED_MARKER = 23
ED_SYNC_SIGNAL = 24
# IN DLL(edk.dll)
# typedef enum EE_DataChannels_enum {
# ED_COUNTER = 0, ED_INTERPOLATED, ED_RAW_CQ,
# ED_AF3, ED_F7, ED_F3, ED_FC5, ED_T7,
# ED_P7, ED_O1, ED_O2, ED_P8, ED_T8,
# ED_FC6, ED_F4, ED_F8, ED_AF4, ED_GYROX,
# ED_GYROY, ED_TIMESTAMP, ED_ES_TIMESTAMP, ED_FUNC_ID, ED_FUNC_VALUE, ED_MARKER,
# ED_SYNC_SIGNAL
# } EE_DataChannel_t;
targetChannelList = [ED_COUNTER, ED_AF3, ED_F7, ED_F3, ED_FC5, ED_T7,ED_P7, ED_O1, ED_O2, ED_P8, ED_T8,ED_FC6, ED_F4, ED_F8, ED_AF4, ED_GYROX, ED_GYROY, ED_TIMESTAMP, ED_FUNC_ID, ED_FUNC_VALUE, ED_MARKER, ED_SYNC_SIGNAL]
write = sys.stdout.write
eEvent = libEDK.EE_EmoEngineEventCreate()
eState = libEDK.EE_EmoStateCreate()
userID = c_uint(0)
nSamples = c_uint(0)
nSam = c_uint(0)
nSamplesTaken = pointer(nSamples)
da = zeros(128, double)
data = pointer(c_double(0))
user = pointer(userID)
composerPort = c_uint(1726)
secs = c_float(1)
datarate = c_uint(0)
readytocollect = False
option = c_int(0)
state = c_int(0)
classes_list = {
0:'Left',
1:'Up',
2:'Right',
3:'Down'
}
print (libEDK.EE_EngineConnect("Emotiv Systems-5"))
if libEDK.EE_EngineConnect("Emotiv Systems-5") != 0:
print "Emotiv Engine start up failed."
else:
print "Emotiv Engine startup has completed successfully.\n\n"
master_session = pd.DataFrame(columns=header)
print "Press any key to being testing"
_ = raw_input()
state = libEDK.EE_EngineGetNextEvent(eEvent)
for _ in range(number_of_sessions):
for target in classes_list:
print "next class is {0}".format(classes_list[target])
speech.say("next class is {0}".format(classes_list[target]))
print "Press any key to continue"
str(raw_input()).lower()
for i in range(3):
if i == 4:
speech.say('Get ready!')
print '\n\nGet ready!\n\n'
time.sleep(0.5)
print i + 1
speech.say(str(i + 1))
session_log = record_data(
session_time=session_time,
target=target,
)
print "\nDo you want to save the current session?"
save_session_response = str(raw_input()).lower()
while save_session_response != 'y':
for i in range(3):
if i == 4:
speech.say('Get ready!')
print '\n\nGet ready!\n\n'
time.sleep(0.5)
print i + 1
speech.say(str(i + 1))
session_log = record_data(
session_time=session_time,
target=target,
)
print "Do you want to save the current session? Hit \'y\' to save session.\n"
save_session_response = str(raw_input()).lower()
master_session = pd.concat([master_session, session_log])
libEDK.EE_EngineDisconnect()
libEDK.EE_EmoStateFree(eState)
libEDK.EE_EmoEngineEventFree(eEvent)
print master_session
master_session.to_csv('./logs/' + timestamp + '_' + user_name + '_' + 'ssvep' + '_' + class_type + '.csv', mode='a', index=None)
print '\nFile has been saved to:' + '/logs/' + timestamp + '_' + user_name + '_' + 'ssvep' + '_' + class_type + '.csv'
| apache-2.0 |
RayMick/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/applications/plot_model_complexity_influence.py | 25 | 6378 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2**-15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
BoltzmannBrain/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axis.py | 69 | 54453 | """
Classes for the ticks and x and y axis
"""
from __future__ import division
from matplotlib import rcParams
import matplotlib.artist as artist
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size = None, # points
gridOn = None, # defaults to axes.grid
tick1On = True,
tick2On = True,
label1On = True,
label2On = False,
major = True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in relative, axes coords
"""
artist.Artist.__init__(self)
if gridOn is None: gridOn = rcParams['axes.grid']
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
if size is None:
if major:
size = rcParams['%s.major.size'%name]
pad = rcParams['%s.major.pad'%name]
else:
size = rcParams['%s.minor.size'%name]
pad = rcParams['%s.minor.pad'%name]
self._tickdir = rcParams['%s.direction'%name]
if self._tickdir == 'in':
self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
self._pad = pad
else:
self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = pad + size
self._loc = loc
self._size = size
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def get_children(self):
children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
#self.tick1line.set_clip_path(clippath, transform)
#self.tick2line.set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occured in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if callable(self._contains): return self._contains(self,mouseevent)
return False,{}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._pad = val
def get_pad(self):
'Get the value of the tick label pad in points'
return self._pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__name__)
midPoint = mtransforms.interval_contains(self.get_view_interval(), self.get_loc())
if midPoint:
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
def _set_artist_props(self, a):
a.set_figure(self.figure)
#if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text1_transform(self._pad)
size = rcParams['xtick.labelsize']
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=size),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[0],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D( xdata=(0,), ydata=(1,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[1],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
#t.set_transform( self.axes.transData )
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (0,), (0,), color='k',
marker = self._ytickmarkers[0],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (1,), (0,), color='k',
marker = self._ytickmarkers[1],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( xdata=(0,1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y( y )
if self.label2On:
self.label2.set_y( y )
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
class Ticker:
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`transData` - transform data coords to display coords
* :attr:`transAxis` - transform axis coords to display coords
"""
LABELPAD = 5
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)"%tuple(self.axes.transAxes.transform_point((0,0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
#class dummy:
# locator = None
# formatter = None
#self.major = dummy()
#self.minor = dummy()
self._autolabelpos = True
self.label = self._get_label()
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
self.cla()
self.set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
# whether the grids are on
self._gridOnMajor = rcParams['axes.grid']
self._gridOnMinor = False
self.label.set_text('')
self._set_artist_props(self.label)
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
self.converter = None
self.units = None
self.set_units(None)
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'Set the axis data limits'
raise NotImplementedError('Derived must override')
def _set_artist_props(self, a):
if a is None: return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
ticklabelBoxes = []
ticklabelBoxes2 = []
if not self.get_visible(): return
renderer.open_group(__name__)
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1+labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1+labels2)
def get_ticklabels(self, minor=False):
'Return a list of Text instances for ticklabels'
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick intsance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None: return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean use *which* =
'major' | 'minor' to set the grid for major or minor ticks
if *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True
*kwargs* are used to set the line properties of the grids, eg,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs): b = True
if which.lower().find('minor')>=0:
if b is None: self._gridOnMinor = not self._gridOnMinor
else: self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMinor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
else:
if b is None: self._gridOnMajor = not self._gridOnMajor
else: self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMajor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True* is *data* is
registered for unit conversion
"""
converter = munits.registry.get_converter(data)
if converter is None: return False
self.converter = converter
default = self.converter.default_units(data)
#print 'update units: default="%s", units=%s"'%(default, self.units)
if default is not None and self.units is None:
self.set_units(default)
self._update_axisinfo()
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units)
if info is None:
return
if info.majloc is not None and self.major.locator!=info.majloc:
self.set_major_locator(info.majloc)
if info.minloc is not None and self.minor.locator!=info.minloc:
self.set_minor_locator(info.minloc)
if info.majfmt is not None and self.major.formatter!=info.majfmt:
self.set_major_formatter(info.majfmt)
if info.minfmt is not None and self.minor.formatter!=info.minfmt:
self.set_minor_formatter(info.minfmt)
if info.label is not None:
label = self.get_label()
label.set_text(info.label)
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
#print 'convert_units returning identity: units=%s, converter=%s'%(self.units, self.converter)
return x
ret = self.converter.convert(x, self.units)
#print 'convert_units converting: axis=%s, units=%s, converter=%s, in=%s, out=%s'%(self, self.units, self.converter, x, ret)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u!=self.units:
self.units = u
#print 'setting units', self.converter, u, munits.registry.get_converter(u)
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
def get_units(self):
'return the units for axis'
return self.units
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.major.formatter = formatter
formatter.set_axis(self)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.minor.formatter = formatter
formatter.set_axis(self)
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.major.locator = locator
locator.set_axis(self)
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.minor.locator = locator
locator.set_axis(self)
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
ACCEPTS: sequence of strings
"""
#ticklabels = [str(l) for l in ticklabels]
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ticks = self.get_major_ticks()
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ret = []
for i, tick in enumerate(ticks):
if i<len(ticklabels):
tick.label1.set_text(ticklabels[i])
ret.append(tick.label1)
tick.label1.update(kwargs)
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
### XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator( mticker.FixedLocator(ticks) )
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self,mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = xaxes>=0 and xaxes<=1 and (
(y<b and y>b-self.pickradius) or
(y>t and y<t+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return XTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center',
)
label.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(label)
self.label_position='bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color = rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right',
)
offsetText.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(offsetText)
self.offset_text_position='bottom'
return offsetText
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
assert position == 'top' or position == 'bottom'
if position == 'top':
self.label.set_verticalalignment('bottom')
else:
self.label.set_verticalalignment('top')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / 72.0))
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / 72.0))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. default resets the tick positions to
the default: ticks on both positions, labels at bottom. none
can be used if you don't want any ticks.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
assert position in ('top', 'bottom', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'top':
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'bottom':
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
for t in ticks:
t.tick1On = True
t.tick2On = True
for t in ticks:
t.update_position(t._loc)
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorTop and minorTop: return 'top'
MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if MajorBottom and MinorBottom: return 'bottom'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = yaxes>=0 and yaxes<=1 and (
(x<l and x>l-self.pickradius) or
(x>r and x<r+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return YTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='center',
horizontalalignment='right',
rotation='vertical',
)
label.set_transform( mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes) )
self._set_artist_props(label)
self.label_position='left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties = font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color = rcParams['ytick.color'],
verticalalignment = 'bottom',
horizontalalignment = 'left',
)
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()) )
self._set_artist_props(offsetText)
self.offset_text_position='left'
return offsetText
def get_label_position(self):
"""
Return the label position (left or right)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
assert position == 'left' or position == 'right'
if position == 'right':
self.label.set_horizontalalignment('left')
else:
self.label.set_horizontalalignment('right')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'left':
if not len(bboxes):
left = self.axes.bbox.xmin
else:
bbox = mtransforms.Bbox.union(bboxes)
left = bbox.x0
self.label.set_position( (left-self.LABELPAD*self.figure.dpi/72.0, y))
else:
if not len(bboxes2):
right = self.axes.bbox.xmax
else:
bbox = mtransforms.Bbox.union(bboxes2)
right = bbox.x1
self.label.set_position( (right+self.LABELPAD*self.figure.dpi/72.0, y))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def set_offset_position(self, position):
assert position == 'left' or position == 'right'
x,y = self.offsetText.get_position()
if position == 'left': x = 0
else: x = 1
self.offsetText.set_ha(position)
self.offsetText.set_position((x,y))
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both or default)
both sets the ticks to appear on both positions, but
does not change the tick labels.
default resets the tick positions to the default:
ticks on both positions, labels on the left.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
assert position in ('left', 'right', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'right':
self.set_offset_position('right')
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'left':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorRight and minorRight: return 'right'
majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if majorLeft and minorLeft: return 'left'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
| agpl-3.0 |
Garrett-R/scikit-learn | sklearn/svm/tests/test_bounds.py | 42 | 2112 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
warmspringwinds/scikit-image | doc/examples/plot_threshold_adaptive.py | 22 | 1307 | """
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import threshold_otsu, threshold_adaptive
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
| bsd-3-clause |
yianni/strands_qsr_lib | qsr_lib/dbg/dbg_rcc8.py | 8 | 6604 | #!/usr/bin/python
# import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
class Dbg(object):
def __init__(self):
pass
def return_bounding_box_2d(self, x, y, xsize, ysize):
"""Return the bounding box
:param x: x center
:param y: y center
:param xsize: x size
:param ysize: y size
:return: list(x1, y1, x2, y2) where (x1, y1) and (x2, y2) are the coordinates of the diagonal points of the
bounding box depending on your coordinates frame
"""
if xsize <= 0 or ysize <= 0:
print("ERROR: can't compute bounding box, xsize or height has no positive value")
return []
return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]
def compute_qsr(self, bb1, bb2):
"""Wrapper for __compute_qsr
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2) :return: an RCC depending on your implementation
"""
return self.__compute_qsr(bb1, bb2)
def __compute_qsr(self, bb1, bb2):
"""Return RCC8
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return: an RCC8 relation from the following:
{0:'dc'} x is disconnected from y
{1:'ec'} x is externally connected with y
{2:'po'} x partially overlaps y
{3:'eq'} x equals y
{4:'tpp'} x is a tangential proper part of y
{5:'ntpp'} y is a non-tangential proper part of x
{6:'tppi'} y is a tangential proper part of x
{7:'ntppi'} y is a non-tangential proper part of x
+-------------+ +-------------+
|a | |a |
| | | |
| A | | B |
| | | |
| b| | b|
+-------------+ +-------------+
"""
# Object 1 Top Left X
ax = bb1[0]
# Object 1 Top Left Y
ay = bb1[1]
# Object 2 Top Left X
cx = bb2[0]
# Object 2 Top Left X
cy = bb2[1]
# Object 1 Bottom Right X
bx = bb1[2]
# Object 1 Bottom Right Y
by = bb1[3]
# Object 2 Bottom Right X
dx = bb2[2]
# Object 2 Bottom Right Y
dy = bb2[3]
# CALCULATE EQ
# Is object1 equal to object2
if(bb1 == bb2):
return "eq"
# Are objects disconnected?
# Cond1. If A's left edge is to the right of the B's right edge, - then A is Totally to right Of B
# Cond2. If A's right edge is to the left of the B's left edge, - then A is Totally to left Of B
# Cond3. If A's top edge is below B's bottom edge, - then A is Totally below B
# Cond4. If A's bottom edge is above B's top edge, - then A is Totally above B
# Cond1 Cond2 Cond3 Cond4
if (ax > dx) or (bx < cx) or (ay > dy) or (by < cy):
return "dc"
# Is one object inside the other
BinsideA = (ax <= cx) and (ay <= cy) and (bx >= dx) and (by >= dy)
AinsideB = (ax >= cx) and (ay >= cy) and (bx <= dx) and (by <= dy)
# Do objects share an X or Y (but are not necessarily touching)
sameX = (ax == cx or ax == dx or bx == cx or bx == dx)
sameY = (ay == cy or ay == dy or by == cy or by == dy)
if AinsideB and (sameX or sameY):
return "tpp"
if BinsideA and (sameX or sameY):
return "tppi"
if AinsideB:
return "ntpp"
if BinsideA:
return "ntppi"
# Are objects touching?
# Cond1. If A's left edge is equal to B's right edge, - then A is to the right of B and touching
# Cond2. If A's right edge is qual to B's left edge, - then A is to the left of B and touching
# Cond3. If A's top edge equal to B's bottom edge, - then A is below B and touching
# Cond4. If A's bottom edge equal to B's top edge, - then A is above B and touching
# Cond1 Cond2 Cond3 Cond4
if (ax == dx) or (bx == cx) or (ay == dy) or (by == cy):
return "ec"
# If none of the other conditions are met, the objects must be parially overlapping
return "po"
def plot_bbs(bb1, bb2):
plt.figure()
ax = plt.gca()
# ax.invert_yaxis()
ax.add_patch(Rectangle((bb1[0], bb1[1]), bb1[2]-bb1[0], bb1[3]-bb1[1], alpha=1, facecolor="blue"))
ax.annotate("o1", (bb1[0], bb1[1]), color='black', weight='bold', fontsize=14)
ax.add_patch(Rectangle((bb2[0], bb2[1]), bb2[2]-bb2[0], bb2[3]-bb2[1], alpha=1, facecolor="red"))
ax.annotate("o2", (bb2[0], bb2[1]), color='black', weight='bold', fontsize=14)
h = 6
l = 0
# ax.set_xlim(l, h)
# ax.set_ylim(l, h)
ax.set_xlim(l, h)
ax.set_ylim(h, l)
plt.show()
if __name__ == '__main__':
dbg = Dbg()
"""
{0:'dc'} x is disconnected from y
{1:'ec'} x is externally connected with y
{2:'po'} x partially overlaps y
{3:'eq'} x equals y
{4:'tpp'} x is a tangential proper part of y
{5:'ntpp'} y is a non-tangential proper part of x
{6:'tppi'} y is a tangential proper part of x
{7:'ntppi'} y is a non-tangential proper part of x
+-------------+ +-------------+
|a | |a |
| | | |
| A | | B |
| | | |
| b| | b|
+-------------+ +-------------+
"""
# Play with these to test (x_center, y_center, xsize(i.e. x-size), ysize(i.e. y-size))
o1 = (3., 3., 2., 2.)
o2 = (3., 2., 1., 1.)
o1 = dbg.return_bounding_box_2d(o1[0], o1[1], o1[2], o1[3])
o2 = dbg.return_bounding_box_2d(o2[0], o2[1], o2[2], o2[3])
# Bounding boxes
# print("o1:", o1)
# print("o2:", o2)
# Relations
print("o1o2:", dbg.compute_qsr(o1, o2))
print("o2o1:", dbg.compute_qsr(o2, o1))
# Plot the boxes
plot_bbs(o1, o2)
| mit |
osvaldshpengler/BuildingMachineLearningSystemsWithPython | ch02/heldout.py | 24 | 1377 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script demonstrates the difference between the training accuracy and
# testing (held-out) accuracy.
import numpy as np
from sklearn.datasets import load_iris
from threshold import fit_model, accuracy
data = load_iris()
features = data['data']
labels = data['target_names'][data['target']]
# We are going to remove the setosa examples as they are too easy:
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
# Now we classify virginica vs non-virginica
is_virginica = (labels == 'virginica')
# Split the data in two: testing and training
testing = np.tile([True, False], 50) # testing = [True,False,True,False,True,False...]
# Training is the negation of testing: i.e., datapoints not used for testing,
# will be used for training
training = ~testing
model = fit_model(features[training], is_virginica[training])
train_accuracy = accuracy(features[training], is_virginica[training], model)
test_accuracy = accuracy(features[testing], is_virginica[testing], model)
print('''\
Training accuracy was {0:.1%}.
Testing accuracy was {1:.1%} (N = {2}).
'''.format(train_accuracy, test_accuracy, testing.sum()))
| mit |
linkmax91/bitquant | web/home/ipython/examples/bitcoinworksheet.py | 1 | 5516 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
from BitcoinAverager import TimeUtil, BitcoinAverager, PriceCompositor, Forex, BitcoinDataLoader
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start = hkg_time.localize(datetime.datetime(2014,2,1,6,0,0))
period = relativedelta(days=1)
intervals = 30
TimeUtil.time_table(start, period, intervals)
# <codecell>
averagers = {}
exchanges = ["anxhkHKD", "bitfinexUSD", "bitstampUSD", "btceUSD", "itbitEUR", "itbitSGD", "itbitUSD", \
"krakenEUR", "krakenUSD", "okcoinCNY", "btcnCNY"]
for e in exchanges:
averagers[e] = BitcoinAverager(e)
averager = averagers["bitfinexUSD"]
averager_base = BitcoinAverager("bitfinexUSD", "GBP")
# <codecell>
averager.index_range()
# <codecell>
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_time = hkg_time.localize(datetime(2014,2,1,6,0,0))
end_time = start_time + relativedelta(days=1)
start_epoch = TimeUtil.unix_epoch(start_time)
end_epoch = TimeUtil.unix_epoch(end_time)
selected = averager.select(start_epoch, end_epoch)
len(selected)
# <codecell>
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_time = hkg_time.localize(datetime(2014,2,1,6,0,0))
end_time = start_time + relativedelta(minute=15)
start_epoch = TimeUtil.unix_epoch(start_time)
end_epoch = TimeUtil.unix_epoch(end_time)
selected = averager.select(start_epoch, end_epoch)
selected
# <codecell>
selected = averager.intervals(start_time, relativedelta(minutes=1),50 )
selected
# <codecell>
selected_base = averager_base.intervals(start_time, relativedelta(minutes=1),50 )
selected_base
# <codecell>
%matplotlib inline
# <codecell>
selected.plot(y='price')
# <codecell>
selected.plot(y="volume")
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start = hkg_time.localize(datetime.datetime(2014,2,1,6,0,0))
period = relativedelta(days=1)
intervals = 30
forex_list = ["GBPUSD"]
forex_table = {}
compositor = PriceCompositor()
avg = compositor.exchange_table(start, period, intervals)
for f in forex_list:
forex = Forex(f)
forex_table[f] = forex.rates(list(map(TimeUtil.unix_epoch, avg.index)), avg.index)
forex_table[f].rename(columns={"rates" : f}, inplace=True)
avg = avg.join(forex_table[f] for f in forex_list)
# <codecell>
forex_table["GBPUSD"]
# <codecell>
f=Forex("USDUSD")
f.rates(map(TimeUtil.unix_epoch, avg.index), avg.index)
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_date = hkg_time.localize(datetime.datetime(2014,2,1,7,0,0))
period = relativedelta(minutes=5)
intervals = 200
forex_table = {}
compositor = PriceCompositor()
avg = compositor.exchange_table(start_date, period, intervals)
for f in forex_list:
forex = Forex(f)
forex_table[f] = forex.rates(list(map(TimeUtil.unix_epoch, avg.index)), avg.index)
forex_table[f].rename(columns={"rates" : f}, inplace=True)
avg = avg.join(forex_table[f] for f in forex_list)
avg[['bitfinexUSD_price', 'bitstampUSD_price']]
# <codecell>
avg[['GBPUSD']]
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_date = hkg_time.localize(datetime.datetime(2014,2,1,7,0,0))
period = relativedelta(hours=1)
intervals = 200
compositor = PriceCompositor()
compositor.currency_table(start_date, period, intervals)
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_date = hkg_time.localize(datetime.datetime(2014,2,1,7,0,0))
period = relativedelta(hours=1)
intervals = 200
compositor = PriceCompositor()
composite = compositor.composite_table(start_date, period, intervals)
composite
# <codecell>
composite[["price"]].plot()
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_date = hkg_time.localize(datetime.datetime(2014,2,1,7,0,0))
period = relativedelta(hours=1)
intervals = 200
compositor = PriceCompositor()
compositor.generate(start_date, period, intervals)
# <codecell>
c = compositor.generate(start_date, period, intervals, converted_prices=True,currency=True)
c
# <codecell>
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((8,1), (0,0), rowspan=7)
ax2 = plt.subplot2grid((8,1), (7,0))
ax1.xaxis.set_ticklabels([])
c[['price', 'GBPUSD_price', 'GBPEUR_price']].plot(ax=ax1)
c[['volume', 'USD_volume', 'EUR_volume']].plot(ax=ax2)
# <codecell>
import datetime
import time
import pytz
from dateutil.relativedelta import relativedelta
hkg_time = pytz.timezone("Asia/Hong_Kong")
start_date = hkg_time.localize(datetime.datetime(2014,2,1,7,0,0))
period = relativedelta(hours=1)
intervals = 200
compositor = PriceCompositor(base_currency="USD")
composite = compositor.generate(start_date, period, intervals)
# <codecell>
composite[["price"]].plot()
# <codecell>
compositor.reload()
# <codecell>
f = BitcoinDataLoader()
# <codecell>
f.filedata()
# <codecell>
| apache-2.0 |
yanlend/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
siutanwong/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
bthirion/scikit-learn | benchmarks/bench_isolation_forest.py | 17 | 4784 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.utils import shuffle as sh
print(__doc__)
def print_outlier_ratio(y):
"""
Helper function to show the distinct value count of element in the target.
Useful indicator for the datasets used in bench_isolation_forest.py.
"""
uniq, cnt = np.unique(y, return_counts=True)
print("----- Target count values: ")
for u, c in zip(uniq, cnt):
print("------ %s -> %d occurences" % (str(u), c))
print("----- Outlier ratio: %.5f" % (np.min(cnt) / len(y)))
np.random.seed(1)
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
# Set this to true for plotting score histograms for each dataset:
with_decision_function_histograms = False
# Removed the shuttle dataset because as of 2017-03-23 mldata.org is down:
# datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['http', 'smtp', 'SA', 'SF', 'forestcover']
# Loop over all datasets for fitting and scoring the estimator:
for dat in datasets:
# Loading and vectorizing the data:
print('====== %s ======' % dat)
print('--- Fetching data...')
if dat in ['http', 'smtp', 'SF', 'SA']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
print('----- ')
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print_outlier_ratio(y)
print('--- Vectorizing data...')
if dat == 'SF':
lb = MultiLabelBinarizer()
x1 = lb.fit_transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b'normal.').astype(int)
print_outlier_ratio(y)
if dat == 'SA':
lb = MultiLabelBinarizer()
x1 = lb.fit_transform(X[:, 1])
x2 = lb.fit_transform(X[:, 2])
x3 = lb.fit_transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b'normal.').astype(int)
print_outlier_ratio(y)
if dat in ('http', 'smtp'):
y = (y != b'normal.').astype(int)
print_outlier_ratio(y)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('--- Fitting the IsolationForest estimator...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more abnormal
print("--- Preparing the plot elements...")
if with_decision_function_histograms:
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('Decision function for %s dataset' % dat)
ax[1].hist(scoring[y_test == 0], bins, color='b', label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r', label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
auc_score = auc(fpr, tpr)
label = ('%s (AUC: %0.3f, train_time= %0.2fs, '
'test_time= %0.2fs)' % (dat, auc_score, fit_time, predict_time))
# Print AUC score and train/test time:
print(label)
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| bsd-3-clause |
agilefall/aws-tools | sqs_metrics.py | 1 | 3242 | #!/usr/bin/python
import argparse
import boto.sqs
import boto.ec2.cloudwatch
import os
import csv
from datetime import datetime, timedelta
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
import StringIO
class SqsMetrics:
default_metrics= ["ApproximateNumberOfMessagesVisible", "NumberOfMessagesDeleted", "NumberOfMessagesSent"]
def __init__(self, region):
assert region, "region must be supplied"
self.region = region
self.cw_conn = None
def connect(self):
if (self.cw_conn is None):
self.cw_conn = boto.ec2.cloudwatch.connect_to_region(self.region)
assert self.cw_conn, "Cannot get cloudwatch connection for region %s" % (self.region)
def get_queue_metrics(self, queue_name, metric_names = default_metrics, num_hours = 24):
print "getting {} for queue {} in region {}".format(metric_names, queue_name, self.region)
self.connect()
end = datetime.utcnow()
start = end - timedelta(hours=num_hours)
datas = []
for metric_name in metric_names:
print "getting metric {} for queue {}".format(metric_name, queue_name)
metrics = self.cw_conn.list_metrics(dimensions = dict(QueueName = queue_name),
namespace='AWS/SQS',
metric_name = metric_name)
query_result = metrics[0].query(start, end, 'Sum', 'Count', period = 300)
df = pd.DataFrame.from_records(query_result, index="Timestamp")
df = df.rename(columns = {'Sum': metric_name})
datas.append(df[metric_name])
return pd.concat(datas, axis=1)
def to_graph(self, metrics_df):
num_cols = len(metrics_df.columns)
for i in range(num_cols):
name = metrics_df.columns[i]
plt.subplot(num_cols, 1, i + 1)
metrics_df[name].plot(title=name)
plt.tight_layout() #pad=0.4, w_pad=0.5, h_pad=1.0)
plt.show()
def to_summary(self, metrics_table):
print metrics_table.describe()
def to_csv(self, metrics_df):
sb = StringIO.StringIO()
metrics_df.to_csv(sb)
print sb.getvalue()
def main():
parser = argparse.ArgumentParser(description="Sqs visible message delta in last hour")
parser.add_argument("queue_name", help="name of queue to retrieve metrics for")
parser.add_argument("-metrics", help="comma delimited list of metrics to retrieve")
parser.add_argument("--region", help="aws region", default=os.environ['AWS_DEFAULT_REGION'])
parser.add_argument("--output", choices=["csv", "graph", "summary"], default="graph", help="how to output results")
parser.add_argument("--hours", type=int, help="num hours to retrieve", default=24)
args = parser.parse_args()
sm = SqsMetrics(args.region)
if args.metrics:
metrics_name = args.metrics.split(',')
else:
metrics_name = SqsMetrics.default_metrics
metrics = sm.get_queue_metrics(args.queue_name, metrics_name, args.hours)
{"csv": sm.to_csv,
"summary": sm.to_summary,
"graph": sm.to_graph
}[args.output](metrics)
if __name__ == "__main__":
main()
| apache-2.0 |
idaks/PW-explorer | PW_explorer/Input_Parsers/Clingo_Parser/clingo_parser.py | 1 | 6566 | from antlr4 import *
from .Antlr_Files.ClingoLexer import ClingoLexer
from .Antlr_Files.ClingoParser import ClingoParser
from .Antlr_Files.ClingoListener import ClingoListener
from ...helper import isfloat, PossibleWorld, Relation
import pandas as pd
import numpy as np
from antlr4.tree.Trees import Trees
def rearrangePWSandRLS(relations, pws):
"""
Sort the possible worlds and relations by their ids
:return: None
"""
relations.sort(key=lambda x: x.r_id)
pws.sort(key=lambda x: x.pw_id)
def loadIntoPandas(relations, pws, dfs):
"""
Populate the Pandas DF, one for each relation
:return: None
"""
for n, rl in enumerate(relations):
cls = ['pw']
cls.extend([str('x' + str(i)) for i in range(1, rl.arity + 1)])
rws = [] # could convert into numpy if sure it's all float/int
for m, pw in enumerate(pws):
if rl.relation_name in pw.rls:
rl_data_pw = []
for rl_data in pw.rls[rl.relation_name]:
rl_data_pw.append(rl_data.copy())
rl_data_pw[-1].insert(0, pw.pw_id)
rws.extend(rl_data_pw)
df = pd.DataFrame(rws, columns=cls)
dfs[rl.relation_name] = df
######################################################################################
######################################################################################
class AntlrClingoListener(ClingoListener):
def __init__(self):
self.pws = []
self.relations = []
self.expected_pws = 0
self.curr_pw = None
self.curr_pw_id = 1
self.curr_fact = None
self.curr_fact_data = None
self.curr_fact_depth = 0
self.n_facts = 0
self.dfs = {}
self.silent = False
def enterClingoOutput(self, ctx):
if ctx.OPTIMUM_FOUND() is not None:
if ctx.OPTIMUM_FOUND().getText() == 'UNSATISFIABLE':
if not self.silent:
print("The problem is unsatisfiable")
# print("enterClingoOutput")
def enterPw(self, ctx):
self.curr_pw = PossibleWorld(self.curr_pw_id)
# assert curr_pw.pw_id == int(ctx.TEXT(0).getText())
if ctx.TEXT(1) is not None:
self.curr_pw.pw_soln = float(ctx.TEXT(1).getText()) if isfloat(ctx.TEXT(1).getText()) else ctx.TEXT(1).getText()
def enterFact(self, ctx):
self.curr_fact_depth += 1
rel_name = ctx.TEXT().getText()
if self.curr_fact_depth == 1:
self.curr_fact = Relation(rel_name)
# Set defaults in case this is a 0-arity relation
self.curr_fact_data = []
else:
tmp_ptr = self.curr_fact_data
for _ in range(self.curr_fact_depth-2):
tmp_ptr = tmp_ptr[-1]
tmp_ptr.append([rel_name])
def enterFact_text(self, ctx:ClingoParser.Fact_textContext):
tmp_ptr = self.curr_fact_data
for _ in range(self.curr_fact_depth - 1):
tmp_ptr = tmp_ptr[-1]
tmp_ptr.append(ctx.TEXT().getText())
def exitFact(self, ctx):
if self.curr_fact_depth == 1:
self.curr_fact.arity = len(self.curr_fact_data)
rl_name_mod = str(self.curr_fact.relation_name + '_' + str(self.curr_fact.arity))
self.curr_fact.relation_name = rl_name_mod
foundMatch = False
for rl in self.relations:
if self.curr_fact.relation_name == rl.relation_name and self.curr_fact.arity == rl.arity:
self.curr_fact.r_id = rl.r_id
foundMatch = True
break
if not foundMatch:
newRl = Relation(self.curr_fact.relation_name)
newRl.arity = self.curr_fact.arity
newRl.r_id = self.n_facts
self.n_facts += 1
self.relations.append(newRl)
self.curr_fact.r_id = newRl.r_id
self.curr_pw.add_relation(self.curr_fact.relation_name, self.curr_fact_data)
self.curr_fact = None # could introduce bugs if passed by pointer in the upper statement, so be careful, use copy() if needed
self.curr_fact_data = None
self.curr_fact_depth -= 1
def exitPw(self, ctx):
self.pws.append(self.curr_pw) # again be wary, else use .copy()
self.curr_pw = None
self.curr_pw_id += 1
def enterOptimum(self, ctx):
optimum_found = ctx.TEXT().getText()
if optimum_found == 'yes':
if not self.silent:
print('Optimum Solution was found')
elif optimum_found == 'no':
if not self.silent:
print('Optimum Solution was not found')
else:
if not self.silent:
print('Unexpected Output:', optimum_found)
def enterOptimization(self, ctx):
opt_soln = ctx.TEXT().getText()
if not self.silent:
print('Optimized Solution is', opt_soln)
def enterModels(self, ctx):
num_models = ctx.TEXT().getText()
num_models = int(num_models)
if not self.silent:
print("Number of Models:", num_models)
self.expected_pws = num_models
def exitClingoOutput(self, ctx):
# loading into pandas DF
rearrangePWSandRLS(self.relations, self.pws)
loadIntoPandas(self.relations, self.pws, self.dfs)
######################################################################################
def __parse_clingo_output__(input_stream, silent=False, print_parse_tree=False):
lexer = ClingoLexer(input_stream)
# use this line to take input from the cmd line
# lexer = ClingoLexer(StdinStream())
ct_stream = CommonTokenStream(lexer)
parser = ClingoParser(ct_stream)
tree = parser.clingoOutput()
if print_parse_tree:
print(Trees.toStringTree(tree, None, parser))
pw_analyzer = AntlrClingoListener()
pw_analyzer.silent = silent
walker = ParseTreeWalker()
walker.walk(pw_analyzer, tree)
return pw_analyzer.dfs, pw_analyzer.relations, pw_analyzer.pws
def parse_clingo_output_from_file(fname, silent=False, print_parse_tree=False):
input_stream = FileStream(fname)
return __parse_clingo_output__(input_stream, silent, print_parse_tree)
def parse_clingo_output_from_string(clingo_output_string, silent=False, print_parse_tree=False):
input_stream = InputStream(clingo_output_string)
return __parse_clingo_output__(input_stream, silent, print_parse_tree)
| apache-2.0 |
shyamalschandra/scikit-learn | setup.py | 19 | 11460 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
from sklearn._build_utils import cythonize
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
cythonize.main(cwd)
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
wronk/mne-python | mne/viz/_3d.py | 2 | 40857 | """Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
import base64
from itertools import cycle
import os.path as op
import warnings
import numpy as np
from scipy import linalg
from ..externals.six import string_types, advance_iterator
from ..io import _loc_to_coil_trans, Info
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..surface import (get_head_surf, get_meg_helmet_surf, read_surface,
transform_surface_to)
from ..transforms import (read_trans, _find_trans, apply_trans,
combine_transforms, _get_trans, _ensure_trans,
invert_transform, Transform)
from ..utils import get_subjects_dir, logger, _check_subject, verbose, warn
from ..fixes import _get_args
from ..defaults import _handle_default
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS, plt_show
from ..externals.six import BytesIO
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True, img_output=False):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
img_output : None | tuple
If tuple (width and height), images will be produced instead of a
single figure with many axes. This mode is designed to reduce the
(substantial) overhead associated with making tens to hundreds
of matplotlib axes, instead opting to re-use a single Axes instance.
Returns
-------
fig : Instance of matplotlib.figure.Figure | list
The figure. Will instead be a list of png images if
img_output is a tuple.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
if img_output is None:
fig, axs = _prepare_trellis(len(slices), 4)
else:
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
fig_size = fig.get_size_inches()
w, h = img_output[0], img_output[1]
w2 = fig_size[0]
fig.set_size_inches([(w2 / float(w)) * w, (w2 / float(w)) * h])
plt.close(fig)
inds = dict(coronal=[0, 1, 2], axial=[2, 0, 1],
sagittal=[2, 1, 0])[orientation]
outs = []
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
if img_output is not None:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
ax.tricontour(surf['rr'][:, inds[0]], surf['rr'][:, inds[1]],
surf['tris'], surf['rr'][:, inds[2]],
levels=[sl], colors='yellow', linewidths=2.0)
if img_output is not None:
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0, img_output[1])
ax.set_ylim(img_output[0], 0)
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
outs.append(base64.b64encode(output.getvalue()).decode('ascii'))
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show)
return fig if img_output is None else outs
@verbose
def plot_trans(info, trans='auto', subject=None, subjects_dir=None,
ch_type=None, source=('bem', 'head'), coord_frame='head',
meg_sensors=False, eeg_sensors=True, dig=False, ref_meg=False,
verbose=None):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, no head
surface will be shown.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
coord_frame : str
Coordinate frame to use, 'head', 'meg', or 'mri'.
meg_sensors : bool
If True, plot MEG sensors as points in addition to showing the helmet.
eeg_sensors : bool
If True, plot EEG sensors as points.
dig : bool
If True, plot the digitization points.
ref_meg : bool
If True (default False), include reference MEG sensors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from ..forward import _create_meg_coils
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info, got %s'
% type(info))
if coord_frame not in ['head', 'meg', 'mri']:
raise ValueError('coord_frame must be "head" or "meg"')
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
show_head = (subject is not None)
if isinstance(trans, string_types):
if trans == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
trans = _find_trans(subject, subjects_dir)
trans = read_trans(trans)
elif trans is None:
trans = Transform('head', 'mri', np.eye(4))
show_head = False
elif not isinstance(trans, dict):
raise TypeError('trans must be str, dict, or None')
head_mri_t = _ensure_trans(trans, 'head', 'mri')
del trans
# both the head and helmet will be in MRI coordinates after this
meg_picks = pick_types(info, meg=True, ref_meg=ref_meg)
surfs = dict()
if show_head:
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs['head'] = get_head_surf(subject, source=source,
subjects_dir=subjects_dir)
if (ch_type is None and len(meg_picks) > 0) or ch_type == 'meg':
surfs['helmet'] = get_meg_helmet_surf(info, head_mri_t)
if coord_frame == 'meg':
surf_trans = combine_transforms(info['dev_head_t'], head_mri_t,
'meg', 'mri')
elif coord_frame == 'head':
surf_trans = head_mri_t
else: # coord_frame == 'mri'
surf_trans = None
for key in surfs.keys():
surfs[key] = transform_surface_to(surfs[key], coord_frame, surf_trans)
del surf_trans
# determine points
meg_rrs, meg_tris = list(), list()
ext_loc = list()
car_loc = list()
eeg_loc = list()
if eeg_sensors and (ch_type is None or ch_type == 'eeg'):
eeg_loc = np.array([info['chs'][k]['loc'][:3]
for k in pick_types(info, meg=False, eeg=True)])
if len(eeg_loc) > 0:
# Transform EEG electrodes from head coordinates if necessary
if coord_frame == 'meg':
eeg_loc = apply_trans(invert_transform(info['dev_head_t']),
eeg_loc)
elif coord_frame == 'mri':
eeg_loc = apply_trans(head_mri_t, eeg_loc)
else:
# only warn if EEG explicitly requested, or EEG channels exist but
# no locations are provided
if (ch_type is not None or
len(pick_types(info, meg=False, eeg=True)) > 0):
warn('EEG electrode locations not found. Cannot plot EEG '
'electrodes.')
if meg_sensors:
coil_transs = [_loc_to_coil_trans(info['chs'][pick]['loc'])
for pick in meg_picks]
# Transform MEG coordinates from meg if necessary
trans = None
if coord_frame == 'head':
trans = info['dev_head_t']
elif coord_frame == 'mri':
trans = combine_transforms(info['dev_head_t'], head_mri_t,
'meg', 'mri')
coils = _create_meg_coils([info['chs'][pick] for pick in meg_picks],
acc='normal')
offset = 0
for coil, coil_trans in zip(coils, coil_transs):
rrs, tris = _sensor_shape(coil)
rrs = apply_trans(coil_trans, rrs)
if trans is not None:
rrs = apply_trans(trans, rrs)
meg_rrs.append(rrs)
meg_tris.append(tris + offset)
offset += len(meg_rrs[-1])
if len(meg_rrs) == 0:
warn('MEG electrodes not found. Cannot plot MEG locations.')
else:
meg_rrs = np.concatenate(meg_rrs, axis=0)
meg_tris = np.concatenate(meg_tris, axis=0)
if dig:
ext_loc = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
car_loc = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL])
if coord_frame == 'meg':
t = invert_transform(info['dev_head_t'])
ext_loc = apply_trans(t, ext_loc)
car_loc = apply_trans(t, car_loc)
elif coord_frame == 'mri':
ext_loc = apply_trans(head_mri_t, ext_loc)
car_loc = apply_trans(head_mri_t, car_loc)
if len(car_loc) == len(ext_loc) == 0:
warn('Digitization points not found. Cannot plot digitization.')
# do the plotting, surfaces then points
from mayavi import mlab
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
alphas = dict(head=1.0, helmet=0.5)
colors = dict(head=(0.6, 0.6, 0.6), helmet=(0.0, 0.0, 0.6))
for key, surf in surfs.items():
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[key], opacity=alphas[key])
datas = (eeg_loc, car_loc, ext_loc)
colors = ((1., 0., 0.), (1., 1., 0.), (1., 0.5, 0.))
alphas = (1.0, 0.5, 0.25)
scales = (0.005, 0.015, 0.0075)
for data, color, alpha, scale in zip(datas, colors, alphas, scales):
if len(data) > 0:
with warnings.catch_warnings(record=True): # traits
mlab.points3d(data[:, 0], data[:, 1], data[:, 2],
color=color, scale_factor=scale, opacity=alpha)
if len(meg_rrs) > 0:
color, alpha = (0., 0.25, 0.5), 0.25
mlab.triangular_mesh(meg_rrs[:, 0], meg_rrs[:, 1], meg_rrs[:, 2],
meg_tris, color=color, opacity=alpha)
mlab.view(90, 90)
return fig
def _make_tris_fan(n_vert):
"""Helper to make tris given a number of vertices of a circle-like obj"""
tris = np.zeros((n_vert - 2, 3), int)
tris[:, 2] = np.arange(2, n_vert)
tris[:, 1] = tris[:, 2] - 1
return tris
def _sensor_shape(coil):
"""Get the sensor shape vertices"""
rrs = np.empty([0, 2])
tris = np.empty([0, 3], int)
id_ = coil['type'] & 0xFFFF
if id_ in (2, 3012, 3013, 3011):
# square figure eight
# wound by right hand rule such that +x side is "up" (+z)
long_side = coil['size'] # length of long side (meters)
offset = 0.0025 # offset of the center portion of planar grad coil
rrs = np.array([
[offset, -long_side / 2.],
[long_side / 2., -long_side / 2.],
[long_side / 2., long_side / 2.],
[offset, long_side / 2.],
[-offset, -long_side / 2.],
[-long_side / 2., -long_side / 2.],
[-long_side / 2., long_side / 2.],
[-offset, long_side / 2.]])
tris = np.concatenate((_make_tris_fan(4),
_make_tris_fan(4) + 4), axis=0)
elif id_ in (2000, 3022, 3023, 3024):
# square magnetometer (potentially point-type)
size = 0.001 if id_ == 2000 else (coil['size'] / 2.)
rrs = np.array([[-1., 1.], [1., 1.], [1., -1.], [-1., -1.]]) * size
tris = _make_tris_fan(4)
elif id_ in (4001, 4003, 5002, 7002, 7003):
# round magnetometer
n_pts = 15 # number of points for circle
circle = np.exp(2j * np.pi * np.arange(n_pts) / float(n_pts))
circle = np.concatenate(([0.], circle))
circle *= coil['size'] / 2. # radius of coil
rrs = np.array([circle.real, circle.imag]).T
tris = _make_tris_fan(n_pts + 1)
elif id_ in (4002, 5001, 5003, 5004, 4004, 4005, 6001, 7001):
# round coil 1st order (off-diagonal) gradiometer
baseline = coil['base'] if id_ in (5004, 4005) else 0.
n_pts = 16 # number of points for circle
# This time, go all the way around circle to close it fully
circle = np.exp(2j * np.pi * np.arange(-1, n_pts) / float(n_pts - 1))
circle[0] = 0 # center pt for triangulation
circle *= coil['size'] / 2.
rrs = np.array([ # first, second coil
np.concatenate([circle.real + baseline / 2.,
circle.real - baseline / 2.]),
np.concatenate([circle.imag, -circle.imag])]).T
tris = np.concatenate([_make_tris_fan(n_pts + 1),
_make_tris_fan(n_pts + 1) + n_pts + 1])
# Go from (x,y) -> (x,y,z)
rrs = np.pad(rrs, ((0, 0), (0, 1)), mode='constant')
return rrs, tris
def _limits_to_control_points(clim, stc_data, colormap):
"""Private helper function to convert limits (values or percentiles)
to control points.
Note: If using 'mne', generate cmap control points for a directly
mirrored cmap for simplicity (i.e., no normalization is computed to account
for a 2-tailed mne cmap).
Parameters
----------
clim : str | dict
Desired limits use to set cmap control points.
Returns
-------
ctrl_pts : list (length 3)
Array of floats corresponding to values to use as cmap control points.
colormap : str
The colormap.
"""
# Based on type of limits specified, get cmap control points
if colormap == 'auto':
if clim == 'auto':
colormap = 'mne' if (stc_data < 0).any() else 'hot'
else:
if 'lims' in clim:
colormap = 'hot'
else: # 'pos_lims' in clim
colormap = 'mne'
if clim == 'auto':
# Set upper and lower bound based on percent, and get average between
ctrl_pts = np.percentile(np.abs(stc_data), [96, 97.5, 99.95])
elif isinstance(clim, dict):
# Get appropriate key for clim if it's a dict
limit_key = ['lims', 'pos_lims'][colormap in ('mne', 'mne_analyze')]
if colormap != 'auto' and limit_key not in clim.keys():
raise KeyError('"pos_lims" must be used with "mne" colormap')
clim['kind'] = clim.get('kind', 'percent')
if clim['kind'] == 'percent':
ctrl_pts = np.percentile(np.abs(stc_data),
list(np.abs(clim[limit_key])))
elif clim['kind'] == 'value':
ctrl_pts = np.array(clim[limit_key])
if (np.diff(ctrl_pts) < 0).any():
raise ValueError('value colormap limits must be strictly '
'nondecreasing')
else:
raise ValueError('If clim is a dict, clim[kind] must be '
' "value" or "percent"')
else:
raise ValueError('"clim" must be "auto" or dict')
if len(ctrl_pts) != 3:
raise ValueError('"lims" or "pos_lims" is length %i. It must be length'
' 3' % len(ctrl_pts))
ctrl_pts = np.array(ctrl_pts, float)
if len(set(ctrl_pts)) != 3:
if len(set(ctrl_pts)) == 1: # three points match
if ctrl_pts[0] == 0: # all are zero
warn('All data were zero')
ctrl_pts = np.arange(3, dtype=float)
else:
ctrl_pts *= [0., 0.5, 1] # all nonzero pts == max
else: # two points match
# if points one and two are identical, add a tiny bit to the
# control point two; if points two and three are identical,
# subtract a tiny bit from point two.
bump = 1e-5 if ctrl_pts[0] == ctrl_pts[1] else -1e-5
ctrl_pts[1] = ctrl_pts[0] + bump * (ctrl_pts[2] - ctrl_pts[0])
return ctrl_pts, colormap
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%0.2f ms',
smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, config_opts=None,
subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto'):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use or a custom look up table. If array, must
be (n x 3) or (n x 4) array for with RGB or RGBA values between
0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
based on whether 'lims' or 'pos_lims' are specified in `clim`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. If 'auto', set clim automatically
based on data percentiles. If dict, should contain:
``kind`` : str
Flag to specify type of limits. 'value' or 'percent'.
``lims`` : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is not 'mne'.
Left, middle, and right bound for colormap.
``pos_lims`` : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is 'mne'.
Left, middle, and right bound for colormap. Positive values
will be mirrored directly across zero during colormap
construction to obtain negative control points.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
from surfer import Brain, TimeViewer
config_opts = _handle_default('config_opts', config_opts)
import mayavi
from mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
# convert control points to locations in colormap
ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap)
# Construct cmap manually if 'mne' and get cmap bounds
# and triage transparent argument
if colormap in ('mne', 'mne_analyze'):
colormap = mne_analyze_colormap(ctrl_pts)
scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]]
transparent = False if transparent is None else transparent
else:
scale_pts = ctrl_pts
transparent = True if transparent is None else transparent
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
subject = _check_subject(stc.subject, subject, True)
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = _get_args(Brain.__init__)
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertices[0])]
else:
data = stc.data[len(stc.vertices[0]):]
vertices = stc.vertices[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1],
fmax=scale_pts[2], transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=('cone', 'sphere'),
scale_factors=(1, 0.6),
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
high_resolution : bool
If True, plot on the original (non-downsampled) cortical mesh.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
modes : list
Should be a list, with each entry being ``'cone'`` or ``'sphere'``
to specify how the dipoles should be shown.
scale_factors : list
List of floating point scale factors for the markers.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
**kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
known_modes = ['cone', 'sphere']
if not isinstance(modes, (list, tuple)) or \
not all(mode in known_modes for mode in modes):
raise ValueError('mode must be a list containing only '
'"cone" or "sphere"')
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
from mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple)) and
len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stcs[k].times, 1e9 * stcs[k].data[mask].ravel(),
c=c, linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
plt_show(show)
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
def plot_dipole_locations(dipoles, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(1, 1, 0), fig_name=None,
fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations
Only the location of the first time point of each dipole is shown.
Parameters
----------
dipoles : list of instances of Dipole | Dipole
The dipoles to plot.
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
fig_name : str
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
Notes
-----
.. versionadded:: 0.9.0
"""
from mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
trans = _get_trans(trans)[0]
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
fname = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
points, faces = read_surface(fname)
points = apply_trans(trans['trans'], points * 1e-3)
from .. import Dipole
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if mode not in ['cone', 'sphere']:
raise ValueError('mode must be in "cone" or "sphere"')
if colors is None:
colors = cycle(COLORS)
fig = mlab.figure(size=fig_size, bgcolor=bgcolor, fgcolor=(0, 0, 0))
with warnings.catch_warnings(record=True): # FutureWarning in traits
mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
faces, color=brain_color, opacity=opacity)
for dip, color in zip(dipoles, colors):
rgb_color = color_converter.to_rgb(color)
with warnings.catch_warnings(record=True): # FutureWarning in traits
mlab.quiver3d(dip.pos[0, 0], dip.pos[0, 1], dip.pos[0, 2],
dip.ori[0, 0], dip.ori[0, 1], dip.ori[0, 2],
opacity=1., mode=mode, color=rgb_color,
scalars=dip.amplitude.max(),
scale_factor=scale_factor)
if fig_name is not None:
mlab.title(fig_name)
if fig.scene is not None: # safe for Travis
fig.scene.x_plus_view()
return fig
| bsd-3-clause |
imito/odin | examples/voxceleb/train_xvec.py | 1 | 7023 | from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use('Agg')
import os
os.environ['ODIN'] = 'float32,gpu'
import scipy.io
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from odin import training
from odin.utils import (args_parse, ctext, Progbar, as_tuple_of_shape,
crypto, stdio)
from odin import fuel as F, visual as V, nnet as N, backend as K
from utils import prepare_dnn_data, get_model_path, csv2mat
# ===========================================================================
# Configs
# ===========================================================================
args = args_parse([
('recipe', 'the name of function defined in feature_recipes.py', None),
('-feat', "Acoustic feature", ('mspec', 'mfcc'), 'mspec'),
('-batch', "batch size", None, 64),
('-epoch', "number of epoch", None, 25),
('-l', "audio segmenting length in second", None, 3),
('--debug', "enable debug mode", None, False),
('--train', "force continue training the saved model", None, False),
])
FEAT = args.feat
TRAIN_MODEL = args.train
DEBUG = bool(args.debug)
(EXP_DIR, MODEL_PATH, LOG_PATH,
TRAIN_PATH, TEST_PATH) = get_model_path('xvec', args)
stdio(LOG_PATH)
# ===========================================================================
# Create data feeder
# ===========================================================================
(train, valid,
test_ids, test_dat,
all_speakers) = prepare_dnn_data(
recipe=args.recipe, feat=FEAT, utt_length=args.l)
n_speakers = len(all_speakers) + 1
# ===========================================================================
# Create the network
# ===========================================================================
inputs = [K.placeholder(shape=(None,) + shape[1:],
dtype='float32',
name='input%d' % i)
for i, shape in enumerate(as_tuple_of_shape(train.shape))]
X = inputs[0]
y = inputs[1]
print("Inputs:", ctext(inputs, 'cyan'))
# ====== the network ====== #
if os.path.exists(MODEL_PATH):
x_vec = N.deserialize(path=MODEL_PATH, force_restore_vars=True)
else:
TRAIN_MODEL = True
with N.args_scope(
['TimeDelayedConv', dict(time_pool='none', activation=K.relu)],
['Dense', dict(activation=K.linear, b_init=None)],
['BatchNorm', dict(activation=K.relu)]
):
x_vec = N.Sequence([
N.Dropout(level=0.3),
N.TimeDelayedConv(n_new_features=512, n_time_context=5),
N.TimeDelayedConv(n_new_features=512, n_time_context=5),
N.TimeDelayedConv(n_new_features=512, n_time_context=7),
N.Dense(512), N.BatchNorm(),
N.Dense(1500), N.BatchNorm(),
N.StatsPool(axes=1, output_mode='concat'),
N.Flatten(outdim=2),
N.Dense(512, name="LatentOutput"), N.BatchNorm(),
N.Dense(512), N.BatchNorm(),
N.Dense(n_speakers, activation=K.linear,
b_init=init_ops.constant_initializer(value=0))
], debug=1)
# ====== create outputs ====== #
y_logit = x_vec(X)
y_proba = tf.nn.softmax(y_logit)
z = K.ComputationGraph(y_proba).get(roles=N.Dense, scope='LatentOutput',
beginning_scope=False)[0]
print('Latent space:', ctext(z, 'cyan'))
# ====== create loss ====== #
ce = tf.losses.softmax_cross_entropy(onehot_labels=y, logits=y_logit)
acc = K.metrics.categorical_accuracy(y_true=y, y_pred=y_proba)
# ====== params and optimizing ====== #
updates = K.optimizers.Adam(lr=0.0001, name='XAdam').minimize(
loss=ce,
roles=[K.role.TrainableParameter],
exclude_roles=[K.role.InitialState],
verbose=True)
K.initialize_all_variables()
# # ====== Functions ====== #
print('Building training functions ...')
f_train = K.function(inputs, [ce, acc], updates=updates,
training=True)
print('Building testing functions ...')
f_score = K.function(inputs, [ce, acc],
training=False)
# Latent spaces
f_z = K.function(inputs=X, outputs=z, training=False)
# ===========================================================================
# Create trainer
# ===========================================================================
if TRAIN_MODEL:
print('Start training ...')
task = training.MainLoop(batch_size=args.batch, seed=1234, shuffle_level=2,
allow_rollback=True)
task.set_checkpoint(MODEL_PATH, x_vec)
task.set_callbacks([
training.NaNDetector(),
training.EarlyStopGeneralizationLoss('valid', ce,
threshold=5, patience=5)
])
task.set_train_task(func=f_train, data=train,
epoch=args.epoch, name='train')
task.set_valid_task(func=f_score, data=valid,
freq=training.Timer(percentage=0.8),
name='valid')
task.run()
# ===========================================================================
# Saving the test data
# CSV separated by tab
# ===========================================================================
sep = '\t'
prog = Progbar(target=len(test_ids) + len(train) + len(valid),
print_summary=True, print_report=True,
name="Extracting x-vector")
with open(TRAIN_PATH, 'w') as f_train, open(TEST_PATH, 'w') as f_test:
# ====== save training set ====== #
for name, idx, X, y in train.set_batch(batch_size=8000,
batch_mode='file', seed=None):
assert idx == 0
y = np.argmax(y, axis=-1)
assert len(set(y)) == 1
y = y[0]
z = np.mean(f_z(X), axis=0, keepdims=False).astype('float32')
f_train.write(sep.join([str(y)] + [str(i) for i in z]) + '\n')
prog.add(X.shape[0])
# ====== save validation set ====== #
for name, idx, X, y in valid.set_batch(batch_size=8000,
batch_mode='file', seed=None):
assert idx == 0
y = np.argmax(y, axis=-1)
assert len(set(y)) == 1
y = y[0]
z = np.mean(f_z(X), axis=0, keepdims=False).astype('float32')
f_train.write(sep.join([str(y)] + [str(i) for i in z]) + '\n')
prog.add(X.shape[0])
# ====== save test set ====== #
for name, (start, end) in sorted(test_ids.items(),
key=lambda x: x[0]):
y = test_dat[start:end]
z = np.mean(f_z(y), axis=0, keepdims=False).astype('float32')
f_test.write(sep.join([name] + [str(i) for i in z]) + '\n')
prog.add(1)
# convert everything to matlab format
csv2mat(exp_dir=EXP_DIR)
# ===========================================================================
# Evaluate and save the log
# ===========================================================================
np.random.seed(87654321)
shape = inputs[0].shape
X = np.random.rand(64, shape[1].value, shape[2].value).astype('float32')
Z = f_z(X)
# ====== make sure model has the same identity ====== #
print(Z.shape, Z.sum(), (Z**2).sum(), Z.std())
print(ctext(crypto.md5_checksum(Z), 'cyan'))
| mit |
kagayakidan/scikit-learn | sklearn/tree/export.py | 78 | 15814 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
mlyundin/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
datapythonista/pandas | asv_bench/benchmarks/tslibs/offsets.py | 6 | 2207 | """
offsets benchmarks that rely only on tslibs. See benchmarks.offset for
offsets benchmarks that rely on other parts of pandas.
"""
from datetime import datetime
import numpy as np
from pandas import offsets
try:
import pandas.tseries.holiday
except ImportError:
pass
hcal = pandas.tseries.holiday.USFederalHolidayCalendar()
# These offsets currently raise a NotImplimentedError with .apply_index()
non_apply = [
offsets.Day(),
offsets.BYearEnd(),
offsets.BYearBegin(),
offsets.BQuarterEnd(),
offsets.BQuarterBegin(),
offsets.BMonthEnd(),
offsets.BMonthBegin(),
offsets.CustomBusinessDay(),
offsets.CustomBusinessDay(calendar=hcal),
offsets.CustomBusinessMonthBegin(calendar=hcal),
offsets.CustomBusinessMonthEnd(calendar=hcal),
offsets.CustomBusinessMonthEnd(calendar=hcal),
]
other_offsets = [
offsets.YearEnd(),
offsets.YearBegin(),
offsets.QuarterEnd(),
offsets.QuarterBegin(),
offsets.MonthEnd(),
offsets.MonthBegin(),
offsets.DateOffset(months=2, days=2),
offsets.BusinessDay(),
offsets.SemiMonthEnd(),
offsets.SemiMonthBegin(),
]
offset_objs = non_apply + other_offsets
class OnOffset:
params = offset_objs
param_names = ["offset"]
def setup(self, offset):
self.dates = [
datetime(2016, m, d)
for m in [10, 11, 12]
for d in [1, 2, 3, 28, 29, 30, 31]
if not (m == 11 and d == 31)
]
def time_on_offset(self, offset):
for date in self.dates:
offset.is_on_offset(date)
class OffestDatetimeArithmetic:
params = offset_objs
param_names = ["offset"]
def setup(self, offset):
self.date = datetime(2011, 1, 1)
self.dt64 = np.datetime64("2011-01-01 09:00Z")
def time_apply(self, offset):
offset.apply(self.date)
def time_apply_np_dt64(self, offset):
offset.apply(self.dt64)
def time_add(self, offset):
self.date + offset
def time_add_10(self, offset):
self.date + (10 * offset)
def time_subtract(self, offset):
self.date - offset
def time_subtract_10(self, offset):
self.date - (10 * offset)
| bsd-3-clause |
samuel1208/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
fjxhkj/PTVS | Python/Product/ML/ProjectTemplates/ClusteringTemplate/clustering.py | 18 | 10394 | '''
This script perfoms the basic process for applying a machine learning
algorithm to a dataset using Python libraries.
The four steps are:
1. Download a dataset (using pandas)
2. Process the numeric data (using numpy)
3. Train and evaluate learners (using scikit-learn)
4. Plot and compare results (using matplotlib)
The data is downloaded from URL, which is defined below. As is normal
for machine learning problems, the nature of the source data affects
the entire solution. When you change URL to refer to your own data, you
will need to review the data processing steps to ensure they remain
correct.
============
Example Data
============
The example is from http://archive.ics.uci.edu/ml/datasets/Water+Treatment+Plant
It contains a range of continuous values from sensors at a water
treatment plant, and the aim is to use unsupervised learners to
determine whether the plant is operating correctly. See the linked page
for more information about the data set.
This script uses unsupervised clustering learners and dimensionality
reduction models to find similar values, outliers, and visualize the
classes.
'''
# Remember to update the script for the new data when you change this URL
URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/water-treatment/water-treatment.data"
# Uncomment this call when using matplotlib to generate images
# rather than displaying interactive UI.
#import matplotlib
#matplotlib.use('Agg')
from pandas import read_table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
try:
# [OPTIONAL] Seaborn makes plots nicer
import seaborn
except ImportError:
pass
# =====================================================================
def download_data():
'''
Downloads the data for this script into a pandas DataFrame.
'''
# If your data is in an Excel file, install 'xlrd' and use
# pandas.read_excel instead of read_table
#from pandas import read_excel
#frame = read_excel(URL)
# If your data is in a private Azure blob, install 'azure' and use
# BlobService.get_blob_to_path() with read_table() or read_excel()
#import azure.storage
#service = azure.storage.BlobService(ACCOUNT_NAME, ACCOUNT_KEY)
#service.get_blob_to_path(container_name, blob_name, 'my_data.csv')
#frame = read_table('my_data.csv', ...
frame = read_table(
URL,
# Uncomment if the file needs to be decompressed
#compression='gzip',
#compression='bz2',
# Specify the file encoding
# Latin-1 is common for data from US sources
encoding='latin-1',
#encoding='utf-8', # UTF-8 is also common
# Specify the separator in the data
sep=',', # comma separated values
#sep='\t', # tab separated values
#sep=' ', # space separated values
# Ignore spaces after the separator
skipinitialspace=True,
# Treat question marks as missing values
na_values=['?'],
# Generate row labels from each row number
index_col=None,
#index_col=0, # use the first column as row labels
#index_col=-1, # use the last column as row labels
# Generate column headers row from each column number
header=None,
#header=0, # use the first line as headers
# Use manual headers and skip the first row in the file
#header=0,
#names=['col1', 'col2', ...],
)
# Return a subset of the columns
#return frame[['col1', 'col4', ...]]
# Return the entire frame
#return frame
# Return all except the first column
del frame[frame.columns[0]]
return frame
# =====================================================================
def get_features(frame):
'''
Transforms and scales the input data and returns a numpy array that
is suitable for use with scikit-learn.
Note that in unsupervised learning there are no labels.
'''
# Replace missing values with 0.0
# or we can use scikit-learn to calculate missing values below
#frame[frame.isnull()] = 0.0
# Convert values to floats
arr = np.array(frame, dtype=np.float)
# Impute missing values from the mean of their entire column
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy='mean')
arr = imputer.fit_transform(arr)
# Normalize the entire data set to mean=0.0 and variance=1.0
from sklearn.preprocessing import scale
arr = scale(arr)
return arr
# =====================================================================
def reduce_dimensions(X):
'''
Reduce the dimensionality of X with different reducers.
Return a sequence of tuples containing:
(title, x coordinates, y coordinates)
for each reducer.
'''
# Principal Component Analysis (PCA) is a linear reduction model
# that identifies the components of the data with the largest
# variance.
from sklearn.decomposition import PCA
reducer = PCA(n_components=2)
X_r = reducer.fit_transform(X)
yield 'PCA', X_r[:, 0], X_r[:, 1]
# Independent Component Analysis (ICA) decomposes a signal by
# identifying the independent contributing sources.
from sklearn.decomposition import FastICA
reducer = FastICA(n_components=2)
X_r = reducer.fit_transform(X)
yield 'ICA', X_r[:, 0], X_r[:, 1]
# t-distributed Stochastic Neighbor Embedding (t-SNE) is a
# non-linear reduction model. It operates best on data with a low
# number of attributes (<50) and is often preceded by a linear
# reduction model such as PCA.
from sklearn.manifold import TSNE
reducer = TSNE(n_components=2)
X_r = reducer.fit_transform(X)
yield 't-SNE', X_r[:, 0], X_r[:, 1]
def evaluate_learners(X):
'''
Run multiple times with different learners to get an idea of the
relative performance of each configuration.
Returns a sequence of tuples containing:
(title, predicted classes)
for each learner.
'''
from sklearn.cluster import (MeanShift, MiniBatchKMeans,
SpectralClustering, AgglomerativeClustering)
learner = MeanShift(
# Let the learner use its own heuristic for determining the
# number of clusters to create
bandwidth=None
)
y = learner.fit_predict(X)
yield 'Mean Shift clusters', y
learner = MiniBatchKMeans(n_clusters=2)
y = learner.fit_predict(X)
yield 'K Means clusters', y
learner = SpectralClustering(n_clusters=2)
y = learner.fit_predict(X)
yield 'Spectral clusters', y
learner = AgglomerativeClustering(n_clusters=2)
y = learner.fit_predict(X)
yield 'Agglomerative clusters (N=2)', y
learner = AgglomerativeClustering(n_clusters=5)
y = learner.fit_predict(X)
yield 'Agglomerative clusters (N=5)', y
# =====================================================================
def plot(Xs, predictions):
'''
Create a plot comparing multiple learners.
`Xs` is a list of tuples containing:
(title, x coord, y coord)
`predictions` is a list of tuples containing
(title, predicted classes)
All the elements will be plotted against each other in a
two-dimensional grid.
'''
# We will use subplots to display the results in a grid
nrows = len(Xs)
ncols = len(predictions)
fig = plt.figure(figsize=(16, 8))
fig.canvas.set_window_title('Clustering data from ' + URL)
# Show each element in the plots returned from plt.subplots()
for row, (row_label, X_x, X_y) in enumerate(Xs):
for col, (col_label, y_pred) in enumerate(predictions):
ax = plt.subplot(nrows, ncols, row * ncols + col + 1)
if row == 0:
plt.title(col_label)
if col == 0:
plt.ylabel(row_label)
# Plot the decomposed input data and use the predicted
# cluster index as the value in a color map.
plt.scatter(X_x, X_y, c=y_pred.astype(np.float), cmap='prism', alpha=0.5)
# Set the axis tick formatter to reduce the number of ticks
ax.xaxis.set_major_locator(MaxNLocator(nbins=4))
ax.yaxis.set_major_locator(MaxNLocator(nbins=4))
# Let matplotlib handle the subplot layout
plt.tight_layout()
# ==================================
# Display the plot in interactive UI
plt.show()
# To save the plot to an image file, use savefig()
#plt.savefig('plot.png')
# Open the image file with the default image viewer
#import subprocess
#subprocess.Popen('plot.png', shell=True)
# To save the plot to an image in memory, use BytesIO and savefig()
# This can then be written to any stream-like object, such as a
# file or HTTP response.
#from io import BytesIO
#img_stream = BytesIO()
#plt.savefig(img_stream, fmt='png')
#img_bytes = img_stream.getvalue()
#print('Image is {} bytes - {!r}'.format(len(img_bytes), img_bytes[:8] + b'...'))
# Closing the figure allows matplotlib to release the memory used.
plt.close()
# =====================================================================
if __name__ == '__main__':
# Download the data set from URL
print("Downloading data from {}".format(URL))
frame = download_data()
# Process data into a feature array
# This is unsupervised learning, and so there are no labels
print("Processing {} samples with {} attributes".format(len(frame.index), len(frame.columns)))
X = get_features(frame)
# Run multiple dimensionality reduction algorithms on the data
print("Reducing dimensionality")
Xs = list(reduce_dimensions(X))
# Evaluate multiple clustering learners on the data
print("Evaluating clustering learners")
predictions = list(evaluate_learners(X))
# Display the results
print("Plotting the results")
plot(Xs, predictions)
| apache-2.0 |
procoder317/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
ehogan/iris | docs/iris/src/conf.py | 5 | 10757 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# -*- coding: utf-8 -*-
#
# Iris documentation build configuration file, created by
# sphinx-quickstart on Tue May 25 13:26:23 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# add some sample files from the developers guide..
sys.path.append(os.path.abspath(os.path.join('developers_guide')))
# -- General configuration -----------------------------------------------------
# Temporary value for use by LaTeX and 'man' output.
# Deleted at the end of the module.
_authors = ('Byron Blay', 'Ed Campbell', 'Philip Elson', 'Richard Hattersley',
'Bill Little')
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
# better class documentation
'custom_class_autodoc',
# Data instance __repr__ filter.
'custom_data_autodoc',
'gen_example_directory',
'generate_package_rst',
'gen_gallery',
# Add labels to figures automatically
'auto_label_figures',
]
# list of packages to document
autopackage_name = ['iris']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Iris'
# define the copyright information for latex builds. Note, for html builds,
# the copyright exists directly inside "_templates/layout.html"
copyright = u'British Crown Copyright 2010 - 2015, Met Office'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import iris
# The short X.Y version.
if iris.__version__ == 'dev':
version = 'dev'
else:
# major.feature(.minor)-dev -> major.minor
version = '.'.join(iris.__version__.split('-')[0].split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = iris.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['sphinxext', 'build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Definer the default highlight language. This also allows the >>> removal
# javascript (copybutton.js) to function.
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['iris']
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
'cartopy': ('http://scitools.org.uk/cartopy/docs/latest/', None),
'biggus': ('https://biggus.readthedocs.io/en/latest/', None),
}
# -- Doctest ------------------------------------------------------------------
doctest_global_setup = 'import iris'
# -- Autodoc ------------------------------------------------------------------
autodoc_member_order = 'groupwise'
autodoc_default_flags = ['show-inheritance']
# include the __init__ method when documenting classes
# document the init/new method at the top level of the class documentation rather than displaying the class docstring
autoclass_content='init'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': 'index.html', 'gallery':'gallery.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Irisdoc'
html_use_modindex = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'Iris.tex', u'Iris Documentation', ' \\and '.join(_authors), 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
latex_elements = {}
latex_elements['docclass'] = 'MO_report'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'iris', u'Iris Documentation', _authors, 1)
]
##########################
# plot directive options #
##########################
plot_formats = [('png', 100),
#('hires.png', 200), ('pdf', 250)
]
# Delete the temporary value.
del _authors
| lgpl-3.0 |
gpospelov/BornAgain | Examples/varia/AllFormFactorsAvailable.py | 1 | 3637 | """
All form factors available in BornAgain in the Born Approximation
"""
import numpy
import bornagain as ba
import ba_plot
from bornagain import deg, angstrom
from matplotlib import pyplot as plt
phi_min, phi_max = -2.0, 2.0
alpha_min, alpha_max = 0.0, 2.0
formfactors = [
ba.FormFactorAnisoPyramid(20.0, 16.0, 13.0, 60.0*deg),
ba.FormFactorBox(20.0, 16.0, 13.0),
ba.FormFactorCantellatedCube(15.0, 6.0),
ba.FormFactorCone(10.0, 13.0, 60.0*deg),
ba.FormFactorCone6(10.0, 13.0, 60.0*deg),
ba.FormFactorCuboctahedron(20.0, 13.0, 0.7, 60.0*deg),
ba.FormFactorCylinder(8.0, 16.0),
ba.FormFactorDodecahedron(5.0),
ba.FormFactorEllipsoidalCylinder(8.0, 13.0, 16.0),
ba.FormFactorFullSphere(8.0),
ba.FormFactorFullSpheroid(10.0, 13.0),
ba.FormFactorHemiEllipsoid(10.0, 6.0, 8.0),
ba.FormFactorIcosahedron(8.0),
ba.FormFactorPrism3(10.0, 13.0),
ba.FormFactorPrism6(5.0, 11.0),
ba.FormFactorPyramid(18.0, 13.0, 60.0*deg),
ba.FormFactorCosineRippleBox(27.0, 20.0, 14.0),
ba.FormFactorSawtoothRippleBox(36.0, 25.0, 14.0, 3.0),
ba.FormFactorTetrahedron(15.0, 6.0, 60.0*deg),
ba.FormFactorTruncatedCube(15.0, 6.0),
ba.FormFactorTruncatedSphere(5.0, 7.0, 0),
ba.FormFactorTruncatedSpheroid(7.5, 9.0, 1.2, 0),
]
def get_sample(formfactor):
"""
Returns a one-layer sample that contains particles with given form factor.
"""
# defining materials
m_vacuum = ba.HomogeneousMaterial("Vacuum", 0.0, 0.0)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
particle = ba.Particle(m_particle, formfactor)
particle_layout = ba.ParticleLayout()
particle_layout.addParticle(particle, 1.0)
vacuum_layer = ba.Layer(m_vacuum)
vacuum_layer.addLayout(particle_layout)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(vacuum_layer)
return multi_layer
def get_simulation(sample):
"""
Returns GISAXS simulation with standard beam and detector.
"""
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(100, phi_min*deg, phi_max*deg, 100,
alpha_min*deg, alpha_max*deg)
simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg)
simulation.setSample(sample)
return simulation
def simulate(ff):
"""
Runs simulation for one form factor, and returns simulated intensity pattern
"""
sample = get_sample(ff)
simulation = get_simulation(sample)
simulation.runSimulation()
return simulation.result()
def simulate_and_plot():
"""
Run simulation one by one for every form factor from the list and plot results
on a single canvas
"""
fig = plt.figure(figsize=(12.80, 10.24))
for nplot, ff in enumerate(formfactors):
name = ff.__class__.__name__
name = name.replace("FormFactor", "")
print("Generating intensity map in BA for '{0}'".format(name))
result = simulate(ff)
# showing the result
plt.subplot(5, 5, nplot + 1)
plt.subplots_adjust(wspace=0.3, hspace=0.3)
ba_plot.plot_colormap(result, xlabel="", ylabel="", zlabel="")
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=6)
plt.xticks(numpy.arange(phi_min, phi_max + 0.0001, 1.0))
plt.text(-0.1,
2.15,
name,
horizontalalignment='center',
verticalalignment='center',
fontsize=9)
plt.show()
if __name__ == '__main__':
simulate_and_plot()
| gpl-3.0 |
davidgbe/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/decomposition/tests/test_nmf.py | 21 | 17922 | import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
| mit |
ALDepp/python_toolbox | plot_budget_terms.py | 1 | 14489 | #!/usr/bin/env python
from netCDF4 import Dataset
import numpy as np
from numpy import inf
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import maskoceans
import os
import pickle
import shelve
import copy
import math
import glob
import matplotlib.lines as mlines
termspath='/net/bhw435/nobackup_1/users/deppenme/sens_exp/a01d/buget_terms/'
region = 'ab' # ab or itcz
#get all data
#SST bias term
SSTbiasin = shelve.open(termspath + 'sst_daily_bias_ece-era.dat', 'r')
tos_bias = SSTbiasin['tos_bias_' + region]
SSTbiasin.close()
tos_bias_monthly = np.asarray((np.nanmean(tos_bias[:31]),np.nanmean(tos_bias[31:62]),np.nanmean(tos_bias[62:93]),np.nanmean(tos_bias[93:])))
#Q terms
Qin = shelve.open(termspath + 'cumsum_deltaK_duetoQ.dat', 'r')
dT_ece = Qin['dT_ece_' + region] #this is the temp change due to Q
dT_era = Qin['dT_era_' + region]
Qin.close()
dT_ece_monthly = np.asarray((np.nanmean(dT_ece[:31]),np.nanmean(dT_ece[31:62]),np.nanmean(dT_ece[62:93]),np.nanmean(dT_ece[93:])))
dT_era_monthly = np.asarray((np.nanmean(dT_era[:31]),np.nanmean(dT_era[31:62]),np.nanmean(dT_era[62:93]),np.nanmean(dT_era[93:])))
bias_q = dT_ece - dT_era
bias_q_monthly = np.asarray((np.nanmean(bias_q[:31]),np.nanmean(bias_q[31:62]),np.nanmean(bias_q[62:93]),np.nanmean(bias_q[93:])))
#SST dev term
SSTdevelopmentin = shelve.open(termspath + 'devSST_' + region + '.dat', 'r')
dcum_ece_ave = SSTdevelopmentin['dcum_ece_ave']
dcum_era_ave = SSTdevelopmentin['dcum_era_ave']
SSTdevelopmentin.close()
dcum_ece_ave_monthly = np.asarray((np.nanmean(dcum_ece_ave[:31]),np.nanmean(dcum_ece_ave[31:62]),np.nanmean(dcum_ece_ave[62:93]),np.nanmean(dcum_ece_ave[93:])))
dcum_era_ave_monthly = np.asarray((np.nanmean(dcum_era_ave[:31]),np.nanmean(dcum_era_ave[31:62]),np.nanmean(dcum_era_ave[62:93]),np.nanmean(dcum_era_ave[93:])))
bias_sst = dcum_ece_ave - dcum_era_ave
#horizontal advection terms
adv = shelve.open(termspath + 'advection_terms_uv_' + region + '.dat','r')
u_ece = np.asarray(adv['u_adv_terms_' + region + '_ece'])
v_ece = np.asarray(adv['v_adv_terms_' + region + '_ece'])
u_ora = np.asarray(adv['u_adv_terms_' + region + '_ora'])
v_ora = np.asarray(adv['v_adv_terms_' + region + '_ora'])
adv.close()
uv_ece = np.cumsum((-u_ece))+np.cumsum((-v_ece))
uv_ora = np.cumsum((-u_ora))+np.cumsum((-v_ora))
bias_uv_ece_ora = uv_ece - uv_ora
#upwelling term
upw_ece = shelve.open(termspath + 'W_below_mxl_divH_ece_' + region + '_mesh.dat')
W_ece = upw_ece['W_' + region]
upw_ece.close()
contr_w_ece = np.cumsum(-W_ece)
upw_ora = shelve.open(termspath + 'W_below_mxl_divH_ora_' + region + '_ORAmesh.dat')
W_ora = upw_ora['W_' + region]
upw_ora.close()
contr_w_ora = np.cumsum(-W_ora)
#biases and monthly means
rest_term_ece = dcum_ece_ave_monthly - uv_ece - (-W_ece) - dT_ece_monthly
rest_term_era = dcum_era_ave_monthly - uv_ora - (-W_ora) - dT_era_monthly
bias_uv_q = bias_uv_ece_ora + bias_q_monthly
bias_upwelling = contr_w_ece - contr_w_ora
#add the biases
bias_tot_all_terms = bias_upwelling + bias_q_monthly + bias_uv_ece_ora
#get the ave monthly sst bias
bias_sst_monthly = np.asarray((np.nanmean(bias_sst[:31]),np.nanmean(bias_sst[31:62]),np.nanmean(bias_sst[62:93]),np.nanmean(bias_sst[93:])))
rest_term_bias = rest_term_ece - rest_term_era
#plot for presentation
#legend(first = moves to the right, second: moves to the top)
#one by one
#nb1
# only dcum sst development
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(122),dcum_ece_ave,label=r'EC-E',color='blue',lw=2)
plt.plot(range(122),dcum_era_ave,label=r'ERA',color='red',lw=2)
plt.plot(range(123),dT_ece,label=r'Q ECE',color='blue',ls='-.',lw=1.5,alpha=0)
plt.plot(range(123),dT_era,label=r'Q ERA',color='red',ls='-.',lw=1.5,alpha=0)
plt.legend(bbox_to_anchor=(0.20, 0.18, 0, 0.05),prop={'size':14},fancybox=True)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ in ' + region + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,2.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region.upper() + '_1_dz_test.pdf',dpi=500, bbox_inches='tight')
plt.close()
#nb2
# dcum sst development and bijdrage q
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-6,2)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(122),dcum_ece_ave,label=r'EC-E',color='blue',lw=2)
plt.plot(range(122),dcum_era_ave,label=r'ERA',color='red',lw=2)
plt.plot(range(123),dT_ece,label=r'Q ECE',color='blue',ls='-.',lw=2.5)
plt.plot(range(123),dT_era,label=r'Q ERA',color='red',ls='-.',lw=2.5)
plt.legend(bbox_to_anchor=(0.20, 0.18, 0, 0.05),prop={'size':14},fancybox=True)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ in ' + region + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,2.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region.upper() + '_2_dz.pdf',dpi=500, bbox_inches='tight')
plt.close()
#nb3 add advection information
#these are the pure advection terms, but for their contribution to dT/dt they must be multiplied with (-1)
#dcum sst and q in the background, U and V apart
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-6,2)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(122),dcum_ece_ave,color='blue',alpha=0.2,lw=2)
plt.plot(range(122),dcum_era_ave,color='red',alpha=0.2,lw=2)
plt.plot(range(123),dT_ece,color='blue',ls='-.',lw=2.5,alpha=0.2)
plt.plot(range(123),dT_era,color='red',ls='-.',lw=2.5,alpha=0.2)
plt.scatter((13,44,76,106),np.cumsum((-u_ece)),label='ECE',color='blue',marker=r'$\mathrm{U}$',s=150)
plt.scatter((13,44,76,106),np.cumsum((-v_ece)),label='ECE',color='blue',marker=r'$\mathrm{V}$',s=150)
plt.scatter((16,47,76,106),np.cumsum((-u_ora)),label='ORA',color='red',marker=r'$\mathrm{U}$',s=150)
plt.scatter((16,47,76,106),np.cumsum((-v_ora)),label='ORA',color='red',marker=r'$\mathrm{V}$',s=150)
plt.plot((15,46,76,107),np.cumsum((-u_ece)),color='blue',lw=1.5,ls=':',alpha=0.4)
plt.plot((15,46,76,107),np.cumsum((-v_ece)),color='blue',lw=1.5,ls=':',alpha=0.4)
plt.plot((15,46,76,107),np.cumsum((-u_ora)),color='red',lw=1.5,ls=':',alpha=0.4)
plt.plot((15,46,76,107),np.cumsum((-v_ora)),color='red',lw=1.5,ls=':',alpha=0.4)
plt.legend(bbox_to_anchor=(0.18, 0.18, 0, 0.05),prop={'size':14},fancybox=True,scatterpoints = 1)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ in ' + region.upper() + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,2.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region.upper() + '_3_dz.pdf',dpi=500, bbox_inches='tight')
plt.close()
#nb4 with actual bijdrage u and v
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-6,2)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(122),dcum_ece_ave,color='blue',alpha=0.2,lw=2)
plt.plot(range(122),dcum_era_ave,color='red',alpha=0.2,lw=2)
plt.plot(range(123),dT_ece,color='blue',ls='-.',lw=2.5,alpha=0.2)
plt.plot(range(123),dT_era,color='red',ls='-.',lw=2.5,alpha=0.2)
plt.scatter((13,44,74,104),uv_ece,label='ECE',color='blue',marker=r'$\mathrm{UV}$',s=330)
plt.scatter((16,47,77,107),uv_ora,label='ORA',color='red',marker=r'$\mathrm{UV}$',s=330)
plt.plot((13,44,74,104),(np.cumsum((-u_ece))+np.cumsum((-v_ece))),color='blue',lw=0.5,ls=':',alpha=0.4)
plt.plot((16,47,77,107),(np.cumsum((-u_ora))+np.cumsum((-v_ora))),color='red',lw=0.5,ls=':',alpha=0.4)
plt.legend(bbox_to_anchor=(0.18, 0.22, 0, 0.05),prop={'size':14},fancybox=True,scatterpoints = 1)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ in ' + region.upper() + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,2.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region + '_4_dz.pdf',dpi=500, bbox_inches='tight')
plt.close()
#nb 5
#--------- put upwelling in
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-6,2)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(122),dcum_ece_ave,color='blue',alpha=0.2,lw=2)
plt.plot(range(122),dcum_era_ave,color='red',alpha=0.2,lw=2)
plt.plot(range(123),dT_ece,color='blue',ls='-.',lw=2.5,alpha=0.2)
plt.plot(range(123),dT_era,color='red',ls='-.',lw=2.5,alpha=0.2)
plt.scatter((13,44,74,104),(np.cumsum((-u_ece))+np.cumsum((-v_ece))),color='blue',marker=r'$\mathrm{UV}$',s=250,alpha=0.2)
plt.scatter((16,47,77,107),(np.cumsum((-u_ora))+np.cumsum((-v_ora))),color='red',marker=r'$\mathrm{UV}$',s=250,alpha=0.2)
plt.plot((13,44,74,104),(np.cumsum((-u_ece))+np.cumsum((-v_ece))),color='blue',lw=0.5,ls=':',alpha=0.2)
plt.plot((16,47,77,107),(np.cumsum((-u_ora))+np.cumsum((-v_ora))),color='red',lw=0.5,ls=':',alpha=0.2)
plt.scatter((13,44,74,104),contr_w_ece,label='ECE',color='blue',marker=r'$\mathrm{W}$',s=250)
plt.plot((13,44,74,104),contr_w_ece,color='blue',lw=0.5,ls='--',alpha=0.4)
plt.scatter((13,44,74,104),contr_w_ora,label='ORA',color='red',marker=r'$\mathrm{W}$',s=250)
plt.plot((13,44,74,104),contr_w_ora,color='red',lw=0.5,ls='--',alpha=0.4)
plt.legend(bbox_to_anchor=(0.18, 0.2, 0, 0.05),prop={'size':14},fancybox=True,scatterpoints = 1)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ in ' + region.upper() + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,3.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region + '_5_dz_MESH.pdf',dpi=500, bbox_inches='tight')
plt.close()
# nb 6
#--------- put rest term in
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-6,2)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(122),dcum_ece_ave,color='blue',alpha=0.2,lw=2)
plt.plot(range(122),dcum_era_ave,color='red',alpha=0.2,lw=2)
plt.plot(range(123),dT_ece,color='blue',ls='-.',lw=2.5,alpha=0.2)
plt.plot(range(123),dT_era,color='red',ls='-.',lw=2.5,alpha=0.2)
plt.scatter((13,44,74,104),(np.cumsum((-u_ece))+np.cumsum((-v_ece))),color='blue',marker=r'$\mathrm{UV}$',s=250,alpha=0.2)
plt.scatter((16,47,77,107),(np.cumsum((-u_ora))+np.cumsum((-v_ora))),color='red',marker=r'$\mathrm{UV}$',s=250,alpha=0.2)
plt.plot((13,44,74,104),(np.cumsum((-u_ece))+np.cumsum((-v_ece))),color='blue',lw=0.5,ls=':',alpha=0.2)
plt.plot((16,47,77,107),(np.cumsum((-u_ora))+np.cumsum((-v_ora))),color='red',lw=0.5,ls=':',alpha=0.2)
plt.scatter((13,44,74,104),contr_w_ece,color='blue',marker=r'$\mathrm{W}$',s=250,alpha=0.2)
plt.plot((13,44,74,104),contr_w_ece,color='blue',lw=0.5,ls='--',alpha=0.4)
plt.scatter((13,44,74,104),contr_w_ora,color='red',marker=r'$\mathrm{W}$',s=250,alpha=0.2)
plt.plot((13,44,74,104),contr_w_ora,color='red',lw=0.5,ls='--',alpha=0.4)
plt.scatter((13,44,74,104),rest_term_ece,label='ECE',color='blue',marker=r'$\mathrm{R^*}$',s=250)
plt.plot((13,44,74,104),rest_term_ece,color='blue',lw=0.5,ls='--',alpha=0.4)
plt.scatter((13,44,74,104),rest_term_era,label='ORA',color='red',marker=r'$\mathrm{R^*}$',s=250)
plt.plot((13,44,74,104),rest_term_era,color='red',lw=0.5,ls='--',alpha=0.4)
plt.legend(bbox_to_anchor=(0.18, 0.2, 0, 0.05),prop={'size':14},fancybox=True,scatterpoints = 1)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ in ' + region.upper() + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,2.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region + '_6_dz_MESH.pdf',dpi=500, bbox_inches='tight')
plt.close()
# nb 7
# - ---- put bias upwelling in
# this is with the star, not the good one
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-4.5,2.5)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(123),tos_bias,color='green',lw=2.5,label='SST')
plt.scatter((15,46,76,107),bias_q_monthly,color='green',marker=r'$\mathrm{Q}$',s=250,alpha=0.4)
plt.plot(range(123),bias_q,color='green',lw=2.5,ls='-.',label='Q',alpha=0.4)
plt.scatter((15,46,76,107),bias_uv_ece_ora,label='UV',color='green',marker=r'$\mathrm{UV}$',s=250,alpha=0.4)
plt.plot((15,46,76,107),bias_uv_ece_ora,color='green',lw=0.5,ls=':',alpha=0.4)
plt.scatter((15,46,76,107),bias_upwelling,label='W',color='green',marker=r'$\mathrm{W}$',s=200,alpha=0.4)
plt.plot((15,46,76,107),bias_upwelling,color='green',lw=0.5,ls='--',alpha=0.4)
plt.scatter((15,46,76,107),bias_tot_all_terms,label='UV+W+Q',marker='*',s=250,color='black',alpha=0.8)
plt.legend(bbox_to_anchor=(0.25, 0.25, 0, 0.05),prop={'size':14},fancybox=True,scatterpoints = 1)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ biases in ' + region.upper() + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,2.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region + '_7_dz_MESH.pdf',dpi=500, bbox_inches='tight')
plt.close()
# nb 8
#this is with the rest term
plt.figure(figsize=(10,8))
plt.xlim(-1,124)
plt.ylim(-4.5,6.5)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.plot(range(123),tos_bias,color='green',lw=2.5,label='SST')
plt.scatter((15,46,76,107),bias_q_monthly,color='green',marker=r'$\mathrm{Q}$',s=250)
plt.plot(range(123),bias_q,color='green',lw=2.5,ls='-.',label='Q')
plt.scatter((15,46,76,107),bias_uv_ece_ora,label='UV',color='green',marker=r'$\mathrm{UV}$',s=250)
plt.plot((15,46,76,107),bias_uv_ece_ora,color='green',lw=0.5,ls=':',alpha=0.4)
plt.scatter((15,46,76,107),bias_upwelling,label='W',color='green',marker=r'$\mathrm{W}$',s=200)
plt.plot((15,46,76,107),bias_upwelling,color='green',lw=0.5,ls='--',alpha=0.4)
plt.scatter((15,46,76,107),rest_term_bias,label=r'$R^*$',marker=r'$\mathrm{R^*}$',s=250,color='green')
plt.plot((15,46,76,107),rest_term_bias,lw=0.5,ls='--',alpha=0.4,color='green')
plt.legend(bbox_to_anchor=(0.18, 0.24, 0, 0.05),prop={'size':14},fancybox=True,scatterpoints = 1)
plt.title(r'$\int_{}^{}{\frac{\mathrm{d}SST}{\mathrm{d}t}}$ biases in ' + region.upper() + ' box',fontsize=16)
plt.xlabel('Days from 05-01', fontsize=16)
plt.ylabel(r"$\Delta$ SST [K]", fontsize=16)
for i in np.arange(-6,6.5,0.5):
plt.axhline(i,color='gray',alpha=0.1,ls='--')
plt.savefig('SSTbiasdev_' + region + '_8_dz_MESH.pdf',dpi=500, bbox_inches='tight')
plt.close()
| mit |
jorik041/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
rupakc/Kaggle-Compendium | Job Salary Prediction/preprocess.py | 4 | 9418 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 22:25:49 2016
Preprocessing Utilities for cleaning and processing of data
@author: Rupak Chakraborty
"""
import pandas as pd
from nltk.corpus import stopwords
import string
from nltk.stem import PorterStemmer
stopword_list = set(stopwords.words("english"))
punctuation_list = list(string.punctuation)
ps = PorterStemmer()
months_list = ["january","february","march","april","may","june","july","august",
"september","october","november","december"]
digit_list = ["0","1","2","3","4","5","6","7","8","9"]
month_list_short = ["jan","feb","mar","apr","may","jun","jul","aug","sept","oct","nov","dec"]
emoticon_list = [":)",":(","^_^","-_-","<3",":D",":P",":/"]
html_tag_list = [" ","<",">","&",";","<strong>","<em>","[1]","</strong>","</em>","<div>","</div>","<b>","</b>","[2]","[3]","...","[img]","[/img]","<u>","</u>","<p>","</p>","\n","\\t","<span>",
"</span>","[Moved]","<br/>","<a>","</a>",""","<br>","<br />","Â","<a rel=\"nofollow\" class=\"ot-hashtag\"","'","<a","’","'"]
extend_punct_list = [' ',',',':',';','\'','\t','\n','?','-','$',"!!","?","w/","!","!!!","w/","'","RT","rt","@","#","/",":)",
":(",":D","^_^","^","...","&","\\",":","?","<",">","$","%","*","`","~","-","_",
"+","=","{","}","[","]","|","\"",",",";",")","(","r/","/u/","*","-"]
punctuation_list.extend(extend_punct_list)
#punctuation_list.remove(".")
months_list.extend(month_list_short)
"""
Given a string normalizes it, i.e. converts it to lowercase and strips it of extra spaces
Params:
--------
s - String which is to be normalized
Returns:
---------
String in the normalized form
"""
def normalize_string(s):
s = s.lower()
s = s.strip()
return s
"""
Given a list of strings normalizes the strings
Params:
-------
string_list - List containing the strings which are to be normalized
Returns:
---------
Returns a list containing the normalized string list
"""
def normalize_string_list(string_list):
normalized_list = []
for sentence in string_list:
normalized_list.append(normalize_string(sentence))
return normalized_list
"""
Given a string and a separator splits up the string in the tokens
Params:
--------
s - string which has to be tokenized
separator - separator based on which the string is to be tokenized
Returns:
---------
A list of words in the sentence based on the separator
"""
def tokenize_string(s,separator):
word_list = list([])
if isinstance(s,basestring):
word_list = s.split(separator)
return word_list
"""
Given a list of sentences tokenizes each sentence in the list
Params:
--------
string_list - List of sentences which have to be tokenized
separator - Separator based on which the sentences have to be tokenized
"""
def tokenize_string_list(string_list,separator):
tokenized_sentence_list = []
for sentence in string_list:
#sentence = sentence.encode("ascii","ignore")
tokenized_sentence_list.append(tokenize_string(sentence,separator))
return tokenized_sentence_list
"""
Given a string containing stopwords removes all the stopwords
Params:
--------
s - String containing the stopwords which are to be removed
Returns:
---------
String sans the stopwords
"""
def remove_stopwords(s):
s = s.lower()
removed_string = ''
words = s.split()
for word in words:
if word not in stopword_list:
removed_string = removed_string + word.strip() + " "
return removed_string.strip()
"""
Given a list of sentences and a filename, writes the sentences to the file
Params:
--------
sentence_list - List of sentences which have to be written to the file
filename - File to which the sentences have to be written
Returns:
---------
Nothing quite just writes the sentences to the file
"""
def write_sentences_to_file(sentence_list,filename):
write_file = open(filename,'w')
for sentence in sentence_list:
write_file.write(encode_ascii(sentence) + '\n')
write_file.flush()
write_file.close()
"""
Removes all the punctuations from a given string
Params:
--------
s - String containing the possible punctuations
Returns:
--------
String without the punctuations (including new lines and tabs)
"""
def remove_punctuations(s):
s = s.lower()
s = s.strip()
for punctuation in punctuation_list:
s = s.replace(punctuation,' ')
return s.strip()
"""
Strips a given string of HTML tags
Params:
--------
s - String from which the HTML tags have to be removed
Returns:
---------
String sans the HTML tags
"""
def remove_html_tags(s):
for tag in html_tag_list:
s = s.replace(tag,' ')
return s
"""
Given a string removes all the digits from them
Params:
-------
s - String from which the digits need to be removed
Returns:
---------
String without occurence of the digits
"""
def remove_digits(s):
for digit in digit_list:
s = s.replace(digit,'')
return s
"""
Given a string returns all occurences of a month from it
Params:
--------
s - String containing possible month names
Returns:
--------
String wihtout the occurence of the months
"""
def remove_months(s):
s = s.lower()
words = s.split()
without_month_list = [word for word in words if word not in months_list]
month_clean_string = ""
for word in without_month_list:
month_clean_string = month_clean_string + word + " "
return month_clean_string.strip()
"""
Checks if a given string contains all ASCII characters
Params:
-------
s - String which is to be checked for ASCII characters
Returns:
--------
True if the string contains all ASCII characters, False otherwise
"""
def is_ascii(s):
if isinstance(s,basestring):
return all(ord(c) < 128 for c in s)
return False
"""
Given a string encodes it in ascii format
Params:
--------
s - String which is to be encoded
Returns:
--------
String encoded in ascii format
"""
def encode_ascii(s):
return s.encode('ascii','ignore')
"""
Stems each word of a given sentence to it's root word using Porters Stemmer
Params:
--------
sentence - String containing the sentence which is to be stemmed
Returns:
---------
Sentence where each word has been stemmed to it's root word
"""
def stem_sentence(sentence):
words = sentence.split()
stemmed_sentence = ""
for word in words:
try:
if is_ascii(word):
stemmed_sentence = stemmed_sentence + ps.stem_word(word) + " "
except:
pass
return stemmed_sentence.strip()
"""
Given a string removes urls from the string
Params:
--------
s - String containing urls which have to be removed
Returns:
--------
String without the occurence of the urls
"""
def remove_url(s):
s = s.lower()
words = s.split()
without_url = ""
for word in words:
if word.count('http:') == 0 and word.count('https:') == 0 and word.count('ftp:') == 0 and word.count('www.') == 0 and word.count('.com') == 0 and word.count('.ly') == 0 and word.count('.st') == 0:
without_url = without_url + word + " "
return without_url.strip()
"""
Given a string removes all the words whose length is less than 3
Params:
--------
s - String from which small words have to be removed.
Returns:
---------
Returns a string without occurence of small words
"""
def remove_small_words(s):
words = s.split()
clean_string = ""
for word in words:
if len(word) >= 3:
clean_string = clean_string + word + " "
return clean_string.strip()
"""
Defines the pipeline for cleaning and preprocessing of text
Params:
--------
s - String containing the text which has to be preprocessed
Returns:
---------
String which has been passed through the preprocessing pipeline
"""
def text_clean_pipeline(s):
s = remove_url(s)
s = remove_punctuations(s)
s = remove_html_tags(s)
s = remove_stopwords(s)
s = remove_months(s)
s = remove_digits(s)
#s = stem_sentence(s)
s = remove_small_words(s)
return s
"""
Given a list of sentences processes the list through the pre-preprocessing pipeline and returns the list
Params:
--------
sentence_list - List of sentences which are to be cleaned
Returns:
---------
The cleaned and pre-processed sentence list
"""
def text_clean_pipeline_list(sentence_list):
clean_sentence_list = list([])
for s in sentence_list:
s = remove_digits(s)
s = remove_punctuations(s)
s = remove_html_tags(s)
s = remove_stopwords(s)
s = remove_months(s)
s = remove_small_words(s)
#s = encode_ascii(s)
#s = remove_url(s)
#s = stem_sentence(s)
clean_sentence_list.append(s)
return clean_sentence_list
"""
Given a excel filepath and a corresponding sheetname reads it and converts it into a dataframe
Params:
--------
filename - Filepath containing the location and name of the file
sheetname - Name of the sheet containing the data
Returns:
---------
pandas dataframe containing the data from the excel file
"""
def get_dataframe_from_excel(filename,sheetname):
xl_file = pd.ExcelFile(filename)
data_frame = xl_file.parse(sheetname)
return data_frame
| mit |
bsipocz/astroML | doc/logos/plot_logo.py | 5 | 1286 | """
NASA Sloan Atlas
----------------
This shows some visualizations of the data from the NASA SDSS Atlas
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
from astroML.datasets import fetch_nasa_atlas
data = fetch_nasa_atlas()
#------------------------------------------------------------
# plot the RA/DEC in an area-preserving projection
RA = data['RA']
DEC = data['DEC']
# convert coordinates to degrees
RA -= 180
RA *= np.pi / 180
DEC *= np.pi / 180
fig = plt.figure(figsize=(8, 2), facecolor='w')
ax = fig.add_axes([0.56, 0.1, 0.4, 0.8], projection='mollweide')
plt.scatter(RA, DEC, s=1, lw=0, c=data['Z'], cmap=plt.cm.copper)
plt.grid(True)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
font = {'family' : 'neuropol X',
'color' : '#222222',
'weight' : 'normal',
'size' : 135,
}
fig.text(0.5, 0.5, 'astroML', ha='center', va='center',
fontdict=font)
#size=135,
#fontproperties=FontProperties(['neuropol X bold', 'neuropol X']))
plt.savefig('logo.png')
plt.show()
| bsd-2-clause |
adico-somoto/deep-learning | image-classification/helper.py | 29 | 5643 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the training data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all training data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_training.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
JoseBlanca/franklin | scripts/stats/hist_from_number_list.py | 1 | 2589 | #!/usr/bin/env python
'Given a column with numbers in a file or stdin it plots an histogram'
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of project.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
from sys import stdin
import matplotlib.pyplot as plt
def main():
''' Main'''
parser = OptionParser('usage: %prog [-v] -nNODES ...', version='%prog 1.0')
parser.add_option('-i', '--infile', dest='infile',
help='Input file')
parser.add_option('-o', '--outfile', dest='outfile',
help='Output file')
parser.add_option('-M', '--max', dest='max', type="float",
help='Maximun limit')
parser.add_option('-m', '--min', dest='min', type="float",
help='Minimun Limit')
parser.add_option('-t', '--interval', dest = 'interval', type='int',
help = 'plot interval?' )
options = parser.parse_args()[0]
if options.infile is None:
ifhand = stdin
else:
ifname = options.infile
ifhand = open(ifname,'rt')
file_content = ifhand.read()
if options.interval is not None:
interval = options.interval
else:
interval = 20
data_list = []
for line in file_content.split('\n'):
line.strip()
if line.isspace() or line == '':
continue
number = float(line)
data_list.append(number)
if options.max is None:
opt_max = max(data_list)
else:
opt_max = options.max
if options.min is None:
opt_min = min(data_list)
else:
opt_min = options.min
plot_range = opt_min, opt_max
#ploting the figure
fig = plt.figure()
axes = fig.add_subplot(111)
axes.hist(data_list, bins=interval, range=plot_range,
facecolor='green', alpha=0.75)
plt.show()
if __name__ == '__main__':
main()
| agpl-3.0 |
chapmanb/cloudbiolinux | installed_files/ipython_config.py | 15 | 14156 | # Configuration file for ipython.
c = get_config()
c.InteractiveShell.autoindent = True
c.InteractiveShell.colors = 'Linux'
c.InteractiveShell.confirm_exit = False
c.AliasManager.user_aliases = [
('ll', 'ls -l'),
('lt', 'ls -ltr'),
]
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# Provides init_extensions() and init_code() methods, to be called after
# init_shell(), which must be implemented by subclasses.
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet').
# c.TerminalIPythonApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Create a massive crash report when IPython enconters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py
# c.TerminalIPythonApp.force_interact = False
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHON_DIR.
# c.TerminalIPythonApp.ipython_dir = u'/home/ubuntu/.ipython'
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.1 (r271:86832, Jun 25 2011, 05:09:01) \nType "copyright", "credits" or "license" for more information.\n\nIPython 0.12 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.external.pretty` to compute the format data of the
# object. If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.external.pretty` for details on how to write
# pretty printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
| mit |
jmschrei/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
AdrienGuille/GrowingNeuralGas | gng.py | 1 | 10351 | # coding: utf-8
import numpy as np
from scipy import spatial
import networkx as nx
import matplotlib.pyplot as plt
from sklearn import decomposition
__authors__ = 'Adrien Guille'
__email__ = 'adrien.guille@univ-lyon2.fr'
'''
Simple implementation of the Growing Neural Gas algorithm, based on:
A Growing Neural Gas Network Learns Topologies. B. Fritzke, Advances in Neural
Information Processing Systems 7, 1995.
'''
class GrowingNeuralGas:
def __init__(self, input_data):
self.network = None
self.data = input_data
self.units_created = 0
plt.style.use('ggplot')
def find_nearest_units(self, observation):
distance = []
for u, attributes in self.network.nodes(data=True):
vector = attributes['vector']
dist = spatial.distance.euclidean(vector, observation)
distance.append((u, dist))
distance.sort(key=lambda x: x[1])
ranking = [u for u, dist in distance]
return ranking
def prune_connections(self, a_max):
nodes_to_remove = []
for u, v, attributes in self.network.edges(data=True):
if attributes['age'] > a_max:
nodes_to_remove.append((u, v))
for u, v in nodes_to_remove:
self.network.remove_edge(u, v)
nodes_to_remove = []
for u in self.network.nodes():
if self.network.degree(u) == 0:
nodes_to_remove.append(u)
for u in nodes_to_remove:
self.network.remove_node(u)
def fit_network(self, e_b, e_n, a_max, l, a, d, passes=1, plot_evolution=False):
# logging variables
accumulated_local_error = []
global_error = []
network_order = []
network_size = []
total_units = []
self.units_created = 0
# 0. start with two units a and b at random position w_a and w_b
w_a = [np.random.uniform(-2, 2) for _ in range(np.shape(self.data)[1])]
w_b = [np.random.uniform(-2, 2) for _ in range(np.shape(self.data)[1])]
self.network = nx.Graph()
self.network.add_node(self.units_created, vector=w_a, error=0)
self.units_created += 1
self.network.add_node(self.units_created, vector=w_b, error=0)
self.units_created += 1
# 1. iterate through the data
sequence = 0
for p in range(passes):
print(' Pass #%d' % (p + 1))
np.random.shuffle(self.data)
steps = 0
for observation in self.data:
# 2. find the nearest unit s_1 and the second nearest unit s_2
nearest_units = self.find_nearest_units(observation)
s_1 = nearest_units[0]
s_2 = nearest_units[1]
# 3. increment the age of all edges emanating from s_1
for u, v, attributes in self.network.edges(data=True, nbunch=[s_1]):
self.network.add_edge(u, v, age=attributes['age']+1)
# 4. add the squared distance between the observation and the nearest unit in input space
self.network.node[s_1]['error'] += spatial.distance.euclidean(observation, self.network.node[s_1]['vector'])**2
# 5 .move s_1 and its direct topological neighbors towards the observation by the fractions
# e_b and e_n, respectively, of the total distance
update_w_s_1 = self.e_b * \
(np.subtract(observation,
self.network.node[s_1]['vector']))
self.network.node[s_1]['vector'] = np.add(
self.network.node[s_1]['vector'], update_w_s_1)
for neighbor in self.network.neighbors(s_1):
update_w_s_n = self.e_n * \
(np.subtract(observation,
self.network.node[neighbor]['vector']))
self.network.node[neighbor]['vector'] = np.add(
self.network.node[neighbor]['vector'], update_w_s_n)
# 6. if s_1 and s_2 are connected by an edge, set the age of this edge to zero
# if such an edge doesn't exist, create it
self.network.add_edge(s_1, s_2, age=0)
# 7. remove edges with an age larger than a_max
# if this results in units having no emanating edges, remove them as well
self.prune_connections(a_max)
# 8. if the number of steps so far is an integer multiple of parameter l, insert a new unit
steps += 1
if steps % l == 0:
if plot_evolution:
self.plot_network('visualization/sequence/' + str(sequence) + '.png')
sequence += 1
# 8.a determine the unit q with the maximum accumulated error
q = 0
error_max = 0
for u in self.network.nodes():
if self.network.node[u]['error'] > error_max:
error_max = self.network.node[u]['error']
q = u
# 8.b insert a new unit r halfway between q and its neighbor f with the largest error variable
f = -1
largest_error = -1
for u in self.network.neighbors(q):
if self.network.node[u]['error'] > largest_error:
largest_error = self.network.node[u]['error']
f = u
w_r = 0.5 * (np.add(self.network.node[q]['vector'], self.network.node[f]['vector']))
r = self.units_created
self.units_created += 1
# 8.c insert edges connecting the new unit r with q and f
# remove the original edge between q and f
self.network.add_node(r, vector=w_r, error=0)
self.network.add_edge(r, q, age=0)
self.network.add_edge(r, f, age=0)
self.network.remove_edge(q, f)
# 8.d decrease the error variables of q and f by multiplying them with a
# initialize the error variable of r with the new value of the error variable of q
self.network.node[q]['error'] *= a
self.network.node[f]['error'] *= a
self.network.node[r]['error'] = self.network.node[q]['error']
# 9. decrease all error variables by multiplying them with a constant d
error = 0
for u in self.network.nodes():
error += self.network.node[u]['error']
accumulated_local_error.append(error)
network_order.append(self.network.order())
network_size.append(self.network.size())
total_units.append(self.units_created)
for u in self.network.nodes():
self.network.node[u]['error'] *= d
if self.network.degree(nbunch=[u]) == 0:
print(u)
global_error.append(self.compute_global_error())
plt.clf()
plt.title('Accumulated local error')
plt.xlabel('iterations')
plt.plot(range(len(accumulated_local_error)), accumulated_local_error)
plt.savefig('visualization/accumulated_local_error.png')
plt.clf()
plt.title('Global error')
plt.xlabel('passes')
plt.plot(range(len(global_error)), global_error)
plt.savefig('visualization/global_error.png')
plt.clf()
plt.title('Neural network properties')
plt.plot(range(len(network_order)), network_order, label='Network order')
plt.plot(range(len(network_size)), network_size, label='Network size')
plt.legend()
plt.savefig('visualization/network_properties.png')
def plot_network(self, file_path):
plt.clf()
plt.scatter(self.data[:, 0], self.data[:, 1])
node_pos = {}
for u in self.network.nodes():
vector = self.network.node[u]['vector']
node_pos[u] = (vector[0], vector[1])
nx.draw(self.network, pos=node_pos)
plt.draw()
plt.savefig(file_path)
def number_of_clusters(self):
return nx.number_connected_components(self.network)
def cluster_data(self):
unit_to_cluster = np.zeros(self.units_created)
cluster = 0
for c in nx.connected_components(self.network):
for unit in c:
unit_to_cluster[unit] = cluster
cluster += 1
clustered_data = []
for observation in self.data:
nearest_units = self.find_nearest_units(observation)
s = nearest_units[0]
clustered_data.append((observation, unit_to_cluster[s]))
return clustered_data
def reduce_dimension(self, clustered_data):
transformed_clustered_data = []
svd = decomposition.PCA(n_components=2)
transformed_observations = svd.fit_transform(self.data)
for i in range(len(clustered_data)):
transformed_clustered_data.append((transformed_observations[i], clustered_data[i][1]))
return transformed_clustered_data
def plot_clusters(self, clustered_data):
number_of_clusters = nx.number_connected_components(self.network)
plt.clf()
plt.title('Cluster affectation')
color = ['r', 'b', 'g', 'k', 'm', 'r', 'b', 'g', 'k', 'm']
for i in range(number_of_clusters):
observations = [observation for observation, s in clustered_data if s == i]
if len(observations) > 0:
observations = np.array(observations)
plt.scatter(observations[:, 0], observations[:, 1], color=color[i], label='cluster #'+str(i))
plt.legend()
plt.savefig('visualization/clusters.png')
def compute_global_error(self):
global_error = 0
for observation in self.data:
nearest_units = self.find_nearest_units(observation)
s_1 = nearest_units[0]
global_error += spatial.distance.euclidean(observation, self.network.node[s_1]['vector'])**2
return global_error
| mit |
camallen/aggregation | experimental/condor/cluster.py | 2 | 2726 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import urllib
import matplotlib.cbook as cbook
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
import agglomerativeClustering
client = pymongo.MongoClient()
db = client['condor_2014-09-19']
collection = db["condor_classifications"]
i = 0
condor_pts = {}
condors_per_user = {}
classification_count = {}
check1 = 5
condors_at_1 = {}
check2 = 10
condors_at_2 = {}
total = 0
maxDiff = {}
check = [{} for i in range(0,11)]
for r in collection.find({"$and" : [{"tutorial":False}, {"subjects": {"$ne": []}} ]}):
#zooniverse_id = r["zooniverse_id"]
user_ip = r["user_ip"]
zooniverse_id = r["subjects"][0]["zooniverse_id"]
if not(zooniverse_id in condor_pts):
condor_pts[zooniverse_id] = set()
#condor_user_id[zooniverse_id] = []
classification_count[zooniverse_id] = 0
condors_per_user[zooniverse_id] = []
classification_count[zooniverse_id] += 1
condor_count = 0
if "marks" in r["annotations"][-1]:
markings = r["annotations"][-1].values()[0]
for marking_index in markings:
marking = markings[marking_index]
try:
if marking["animal"] == "condor":
scale = 1.875
x = scale*float(marking["x"])
y = scale*float(marking["y"])
condor_pts[zooniverse_id].add(((x,y),user_ip))
#condor_user_id[zooniverse_id].append(user_ip)
condor_count += 1
except KeyError:
continue
condors_per_user[zooniverse_id].append(condor_count)
#if (classification_count[zooniverse_id] == 5) and (condor_pts[zooniverse_id] != []):
if (np.mean(condors_per_user[zooniverse_id]) > 2) and (len(condors_per_user[zooniverse_id]) > 4):
if condor_pts[zooniverse_id] != set([]):
object_id = str(r["subjects"][0]["id"])
url = r["subjects"][0]["location"]["standard"]
cluster_center = agglomerativeClustering.agglomerativeClustering(condor_pts[zooniverse_id])
break
if not(os.path.isfile("/home/greg/Databases/condors/images/"+object_id+".JPG")):
urllib.urlretrieve (url, "/home/greg/Databases/condors/images/"+object_id+".JPG")
image_file = cbook.get_sample_data("/home/greg/Databases/condors/images/"+object_id+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
#plt.show()
#
if cluster_center != []:
x,y = zip(*cluster_center)
plt.plot(x,y,'.',color='blue')
plt.show()
| apache-2.0 |
cxxgtxy/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
q1ang/tushare | tushare/datayes/future.py | 17 | 1740 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Future():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Futu(self, exchangeCD='', secID='', ticker='', contractObject='', field=''):
"""
获取国内四大期货交易所期货合约的基本要素信息,
包括合约名称、合约代码、合约类型、合约标的、报价单位、最小变动价位、涨跌停板幅度、交易货币、
合约乘数、交易保证金、上市日期、最后交易日、交割日期、交割方式、交易手续费、交割手续费、挂牌基准价、合约状态等。
"""
code, result = self.client.getData(vs.FUTU%(exchangeCD, secID, ticker, contractObject, field))
return _ret_data(code, result)
def FutuConvf(self, secID='', ticker='', field=''):
"""
获取国债期货转换因子信息,包括合约可交割国债名称、可交割国债交易代码、转换因子等。
"""
code, result = self.client.getData(vs.FUTUCONVF%(secID, ticker, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/utils/tests/test_utils.py | 47 | 9089 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| mit |
dhruv13J/scikit-learn | sklearn/tree/tests/test_tree.py | 72 | 47440 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/linear_model/least_angle.py | 61 | 54324 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
cmap/cmapPy | cmapPy/pandasGEXpress/tests/python2_tests/test_write_gctx.py | 1 | 11644 | import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import unittest
import h5py
import os
import numpy
import cmapPy.pandasGEXpress.parse_gctx as parse_gctx
import cmapPy.pandasGEXpress.write_gctx as write_gctx
import cmapPy.pandasGEXpress.mini_gctoo_for_testing as mini_gctoo_for_testing
__author__ = "Oana Enache"
__email__ = "oana@broadinstitute.org"
FUNCTIONAL_TESTS_PATH = "cmapPy/pandasGEXpress/tests/functional_tests/"
# instantiate logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestWriteGctx(unittest.TestCase):
def test_add_gctx_to_out_name(self):
name1 = "my_cool_file"
name2 = "my_other_cool_file.gctx"
# case 1: out file name doesn't end in gctx
out_name1 = write_gctx.add_gctx_to_out_name(name1)
self.assertTrue(out_name1 == name1 + ".gctx",
("out name should be my_cool_file.gctx, not {}").format(out_name1))
# case 2: out file name does end in gctx
out_name2 = write_gctx.add_gctx_to_out_name(name2)
self.assertTrue(out_name2 == name2,
("out name should be my_other_cool_file.gctx, not {}").format(out_name2))
def test_write_src(self):
# case 1: gctoo obj doesn't have src
mini1 = mini_gctoo_for_testing.make()
mini1.src = None
write_gctx.write(mini1, "no_src_example")
hdf5_file = h5py.File("no_src_example.gctx")
hdf5_src1 = hdf5_file.attrs[write_gctx.src_attr]
hdf5_file.close()
self.assertEqual(hdf5_src1, "no_src_example.gctx")
os.remove("no_src_example.gctx")
# case 2: gctoo obj does have src
mini2 = mini_gctoo_for_testing.make()
write_gctx.write(mini2, "with_src_example.gctx")
hdf5_file = h5py.File("with_src_example.gctx")
hdf5_src2 = hdf5_file.attrs[write_gctx.src_attr]
hdf5_file.close()
self.assertEqual(hdf5_src2, "mini_gctoo.gctx")
os.remove("with_src_example.gctx")
def test_write_version(self):
# TODO @oana refactor this test so it just calls the write_version method
# case 1: gctoo obj doesn't have version
mini1 = mini_gctoo_for_testing.make()
mini1.version = None
fn = "no_version_provided_example.gctx"
write_gctx.write(mini1, fn)
hdf5_file = h5py.File(fn)
hdf5_v1 = hdf5_file.attrs[write_gctx.version_attr]
hdf5_file.close()
self.assertEqual(hdf5_v1, write_gctx.version_number)
os.remove(fn)
# case 2: gctoo obj does have version, but it is not used when writing
mini2 = mini_gctoo_for_testing.make()
mini2.version = "MY_VERSION"
fn = "with_version_provided_example.gctx"
write_gctx.write(mini2, fn)
hdf5_file = h5py.File(fn)
hdf5_v2 = hdf5_file.attrs[write_gctx.version_attr]
hdf5_file.close()
self.assertEqual(hdf5_v2, write_gctx.version_number)
os.remove(fn)
def test_calculate_elem_per_kb(self):
max_chunk_kb = 1024
# dtype is numpy.float32
dtype1 = numpy.float32
correct_elem_per_kb1 = 256
elem_per_kb1 = write_gctx.calculate_elem_per_kb(max_chunk_kb, dtype1)
self.assertEqual(elem_per_kb1, correct_elem_per_kb1)
# dtype is numpy.float64
dtype2 = numpy.float64
correct_elem_per_kb2 = 128
elem_per_kb2 = write_gctx.calculate_elem_per_kb(max_chunk_kb, dtype2)
self.assertEqual(elem_per_kb2, correct_elem_per_kb2)
# dtype is somethign else
dtype3 = numpy.int
with self.assertRaises(Exception) as context:
write_gctx.calculate_elem_per_kb(max_chunk_kb, dtype3)
self.assertTrue("only numpy.float32 and numpy.float64 are currently supported" in str(context.exception))
def test_set_data_matrix_chunk_size(self):
max_chunk_kb = 1024
elem_per_kb = 256
sample_data_shape = (978, 1000)
expected_chunk_size = (978, 268)
calculated_chunk_size = write_gctx.set_data_matrix_chunk_size(sample_data_shape, max_chunk_kb, elem_per_kb)
self.assertEqual(calculated_chunk_size, expected_chunk_size)
def test_write_metadata(self):
"""
CASE 1:
- write metadata (has '-666') to file, do not convert -666
- parse in written metadata, don't convert -666
"""
mini_gctoo = mini_gctoo_for_testing.make(convert_neg_666=False)
hdf5_writer = h5py.File(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx", "w")
write_gctx.write_metadata(hdf5_writer, "row", mini_gctoo.row_metadata_df, False, 6)
write_gctx.write_metadata(hdf5_writer, "col", mini_gctoo.col_metadata_df, False, 6)
hdf5_writer.close()
logger.debug("Wrote mini_gctoo_metadata.gctx to {}".format(
os.path.join(FUNCTIONAL_TESTS_PATH, "mini_gctoo_metadata.gctx")))
# read in written metadata, then close and delete file
mini_gctoo_col_metadata = parse_gctx.get_column_metadata(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx",
convert_neg_666=False)
mini_gctoo_row_metadata = parse_gctx.get_row_metadata(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx",
convert_neg_666=False)
os.remove(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx")
# check row metadata
self.assertTrue(set(mini_gctoo.row_metadata_df.columns) == set(mini_gctoo_row_metadata.columns),
"Mismatch between expected row metadata columns {} and column values written to file: {}".format(
mini_gctoo.row_metadata_df.columns, mini_gctoo_row_metadata.columns))
self.assertTrue(set(mini_gctoo.row_metadata_df.index) == set(mini_gctoo.col_metadata_df.index),
"Mismatch between expect row metadata index {} and index values written to file: {}".format(
mini_gctoo.row_metadata_df.index, mini_gctoo_row_metadata.index))
for c in list(mini_gctoo.row_metadata_df.columns):
logger.debug("C1: For column name: {}".format(c))
logger.debug("C1: populated values: {}".format(set(mini_gctoo_row_metadata[c])))
logger.debug("C1: mini_gctoo values: {}".format(set(mini_gctoo.row_metadata_df[c])))
self.assertTrue(set(mini_gctoo.row_metadata_df[c]) == set(mini_gctoo_row_metadata[c]),
"Values in column {} differ between expected metadata and written row metadata: {} vs {}".format(
c, set(mini_gctoo.row_metadata_df[c]), set(mini_gctoo_row_metadata[c])))
# check col metadata
self.assertTrue(set(mini_gctoo.col_metadata_df.columns) == set(mini_gctoo_col_metadata.columns),
"Mismatch between expected col metadata columns {} and column values written to file: {}".format(
mini_gctoo.col_metadata_df.columns, mini_gctoo_col_metadata.columns))
self.assertTrue(set(mini_gctoo.col_metadata_df.index) == set(mini_gctoo.col_metadata_df.index),
"Mismatch between expect col metadata index {} and index values written to file: {}".format(
mini_gctoo.col_metadata_df.index, mini_gctoo_col_metadata.index))
for c in list(mini_gctoo.col_metadata_df.columns):
self.assertTrue(set(mini_gctoo.col_metadata_df[c]) == set(mini_gctoo_col_metadata[c]),
"Values in column {} differ between expected metadata and written col metadata!".format(c))
"""
CASE 2:
- write metadata (has NaN, not '-666') to file, do convert NaN back to '-666'
- parse in written metadata, don't convert -666
"""
# first convert mini_gctoo's row & col metadata dfs -666s to NaN
converted_row_metadata = mini_gctoo.row_metadata_df.replace([-666, "-666", -666.0],
[numpy.nan, numpy.nan, numpy.nan])
logger.debug("First row of converted_row_metadata: {}".format(converted_row_metadata.iloc[0]))
converted_col_metadata = mini_gctoo.col_metadata_df.replace([-666, "-666", -666.0],
[numpy.nan, numpy.nan, numpy.nan])
# write row and col metadata fields from mini_gctoo_for_testing instance to file
# Note this time does convert back to -666
hdf5_writer = h5py.File(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx", "w")
write_gctx.write_metadata(hdf5_writer, "row", converted_row_metadata, True, 6)
write_gctx.write_metadata(hdf5_writer, "col", converted_col_metadata, True, 6)
hdf5_writer.close()
# read in written metadata, then close and delete file
mini_gctoo_col_metadata = parse_gctx.get_column_metadata(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx",
convert_neg_666=False)
mini_gctoo_row_metadata = parse_gctx.get_row_metadata(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx",
convert_neg_666=False)
os.remove(FUNCTIONAL_TESTS_PATH + "/mini_gctoo_metadata.gctx")
# check row metadata
self.assertTrue(set(mini_gctoo.row_metadata_df.columns) == set(mini_gctoo_row_metadata.columns),
"Mismatch between expected row metadata columns {} and column values written to file: {}".format(
mini_gctoo.row_metadata_df.columns, mini_gctoo_row_metadata.columns))
self.assertTrue(set(mini_gctoo.row_metadata_df.index) == set(mini_gctoo.col_metadata_df.index),
"Mismatch between expect row metadata index {} and index values written to file: {}".format(
mini_gctoo.row_metadata_df.index, mini_gctoo_row_metadata.index))
for c in list(mini_gctoo.row_metadata_df.columns):
logger.debug("C2: For column name: {}".format(c))
logger.debug("C2: populated values: {}".format(set(mini_gctoo_row_metadata[c])))
logger.debug("C2: mini_gctoo values: {}".format(set(mini_gctoo.row_metadata_df[c])))
self.assertTrue(set(mini_gctoo.row_metadata_df[c]) == set(mini_gctoo_row_metadata[c]),
"Values in column {} differ between expected metadata and written row metadata!".format(c))
# check col metadata
self.assertTrue(set(mini_gctoo.col_metadata_df.columns) == set(mini_gctoo_col_metadata.columns),
"Mismatch between expected col metadata columns {} and column values written to file: {}".format(
mini_gctoo.col_metadata_df.columns, mini_gctoo_col_metadata.columns))
self.assertTrue(set(mini_gctoo.col_metadata_df.index) == set(mini_gctoo.col_metadata_df.index),
"Mismatch between expect col metadata index {} and index values written to file: {}".format(
mini_gctoo.col_metadata_df.index, mini_gctoo_col_metadata.index))
for c in list(mini_gctoo.col_metadata_df.columns):
self.assertTrue(set(mini_gctoo.col_metadata_df[c]) == set(mini_gctoo_col_metadata[c]),
"Values in column {} differ between expected metadata and written col metadata!".format(c))
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/semi_supervised/label_propagation.py | 15 | 15050 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
paninski-lab/yass | src/yass/pipeline_nn_training.py | 1 | 6973 | """
Built-in pipeline
"""
import time
import logging
import logging.config
import shutil
import os
import matplotlib
matplotlib.use('Agg')
# supress PCA unpickle userwarning
# Cat: TODO: this is dangersous, may wish to fix the problem in cluster.py
# import warnings
# warnings.filterwarnings("ignore", category=UserWarning)
try:
# py3
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import numpy as np
import yaml
import yass
from yass import set_config
from yass import read_config
from yass.config import Config
from yass import (preprocess, detect, cluster, postprocess, augment)
from yass.neuralnetwork import Detect, Denoise
from yass.util import (load_yaml, save_metadata, load_logging_config_file,
human_readable_time)
def run(config, logger_level='INFO', clean=False, output_dir='tmp/'):
"""Run YASS built-in pipeline
Parameters
----------
config: str or mapping (such as dictionary)
Path to YASS configuration file or mapping object
logger_level: str
Logger level
clean: bool, optional
Delete CONFIG.data.root_folder/output_dir/ before running
output_dir: str, optional
Output directory (if relative, it makes it relative to
CONFIG.data.root_folder) to store the output data, defaults to tmp/.
If absolute, it leaves it as it is.
complete: bool, optional
Generates extra files (needed to generate phy files)
Notes
-----
Running the preprocessor will generate the followiing files in
CONFIG.data.root_folder/output_directory/:
* ``config.yaml`` - Copy of the configuration file
* ``metadata.yaml`` - Experiment metadata
* ``filtered.bin`` - Filtered recordings (from preprocess)
* ``filtered.yaml`` - Filtered recordings metadata (from preprocess)
* ``standardized.bin`` - Standarized recordings (from preprocess)
* ``standardized.yaml`` - Standarized recordings metadata (from preprocess)
* ``whitening.npy`` - Whitening filter (from preprocess)
Returns
-------
numpy.ndarray
Spike train
"""
# load yass configuration parameters
CONFIG = Config.from_yaml(config)
#CONFIG._data['cluster']['min_fr'] = 0.5
CONFIG._data['cluster']['knn_triage'] = 0.2
CONFIG._data['neuralnetwork']['apply_nn'] = False
CONFIG._data['detect']['threshold'] = 4
#CONFIG._data['clean_up']['min_ptp'] = 5
if CONFIG._data['neuralnetwork']['training']['spike_size_ms'] is not None:
CONFIG._data['recordings']['spike_size_ms'] = CONFIG._data['neuralnetwork']['training']['spike_size_ms']
CONFIG._data['neuralnetwork']['apply_nn'] = False
set_config(CONFIG._data, output_dir)
CONFIG = read_config()
TMP_FOLDER = CONFIG.path_to_output_directory
# remove tmp folder if needed
if os.path.exists(TMP_FOLDER) and clean:
shutil.rmtree(TMP_FOLDER)
# create TMP_FOLDER if needed
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
# load logging config file
logging_config = load_logging_config_file()
logging_config['handlers']['file']['filename'] = os.path.join(
TMP_FOLDER,'yass.log')
logging_config['root']['level'] = logger_level
# configure logging
logging.config.dictConfig(logging_config)
# instantiate logger
logger = logging.getLogger(__name__)
# print yass version
logger.info('YASS version: %s', yass.__version__)
''' **********************************************
******** SET ENVIRONMENT VARIABLES ***********
**********************************************
'''
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["GIO_EXTRA_MODULES"] = "/usr/lib/x86_64-linux-gnu/gio/modules/"
# TODO: if input spike train is None, run yass with threshold detector
#if fname_spike_train is None:
# logger.info('Not available yet. You must input spike train')
# return
''' **********************************************
************** PREPROCESS ********************
**********************************************
'''
# preprocess
start = time.time()
(standardized_path,
standardized_dtype) = preprocess.run(
os.path.join(TMP_FOLDER, 'preprocess'))
TMP_FOLDER = os.path.join(TMP_FOLDER, 'nn_train')
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
if CONFIG.neuralnetwork.training.input_spike_train_filname is None:
# run on 10 minutes of data
rec_len = np.min((CONFIG.rec_len/CONFIG.recordings.sampling_rate, 600))
# detect
logger.info('DETECTION')
spike_index_path = detect.run(
standardized_path,
standardized_dtype,
os.path.join(TMP_FOLDER, 'detect'),
run_chunk_sec=[0, rec_len])
logger.info('CLUSTERING')
# cluster
raw_data = True
full_run = False
fname_templates, fname_spike_train = cluster.run(
os.path.join(TMP_FOLDER, 'cluster'),
standardized_path,
standardized_dtype,
fname_spike_index=spike_index_path,
raw_data=True,
full_run=True)
methods = ['off_center', 'low_ptp', 'duplicate', 'high_mad', 'low_ptp']
(_, fname_spike_train, _, _, _) = postprocess.run(
methods,
os.path.join(TMP_FOLDER,
'cluster_post_process'),
standardized_path,
standardized_dtype,
fname_templates,
fname_spike_train)
else:
# if there is an input spike train, use it
fname_spike_train = CONFIG.neuralnetwork.training.input_spike_train_filname
# Get training data maker
DetectTD, DenoTD = augment.run(
standardized_path,
standardized_dtype,
fname_spike_train,
os.path.join(TMP_FOLDER, 'augment'))
# Train Detector
detector = Detect(CONFIG.neuralnetwork.detect.n_filters,
CONFIG.spike_size_nn,
CONFIG.channel_index,
CONFIG).cuda()
fname_detect = os.path.join(TMP_FOLDER, 'detect.pt')
detector.train(fname_detect, DetectTD)
# Train Denoiser
denoiser = Denoise(CONFIG.neuralnetwork.denoise.n_filters,
CONFIG.neuralnetwork.denoise.filter_sizes,
CONFIG.spike_size_nn,
CONFIG).cuda()
fname_denoise = os.path.join(TMP_FOLDER, 'denoise.pt')
denoiser.train(fname_denoise, DenoTD)
output_folder = os.path.join(CONFIG.path_to_output_directory, 'output')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
shutil.copyfile(fname_detect, os.path.join(output_folder, 'detect.pt'))
shutil.copyfile(fname_denoise, os.path.join(output_folder, 'denoise.pt'))
| apache-2.0 |
collbb/ThinkStats2 | code/hinc.py | 67 | 1494 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
jmausolf/Python_Tutorials | Machine_Learning/run.py | 1 | 1446 | import sys, argparse, textwrap
sys.path.append('sklearn_magicloops')
from magicloops import *
############################################################
# 1. USER INPUT: Define Your Data, Outcome, and Features
############################################################
#Define Data
dataset = 'data/gss2014.csv'
outcome = 'partyid_str_rep'
features = ['age', 'sex', 'race', 'educ', 'rincome']
############################################################
# 2. RUN THE MODELS
############################################################
# Save Changes and Open Terminal
# In terminal:
# python run.py #Runs for defined features
# python run.py -a True #Runs all possible features
# Help:
# python run.py -h
############################################################
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Prepare input file',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', '--all_features', default=False, type=bool,
help=textwrap.dedent("""\
Option to run magic loop for all features in dataset
except for the outcome variable.
@ False | Runs only specified features (defined in run.py)
@ True | Runs all features except outcome (defined in run.py)
"""
))
args = parser.parse_args()
main(dataset, outcome, features, args.all_features)
| mit |
ishanic/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
reinvantveer/Topology-Learning | model/building_convnet_fixed.py | 1 | 4922 | """
This script executes the task of estimating the building type, based solely on the geometry for that building.
The data for this script can be found at http://hdl.handle.net/10411/GYPPBR.
"""
import os
import socket
import sys
from datetime import datetime, timedelta
from pathlib import Path
from time import time
from urllib.request import urlretrieve
import numpy as np
from keras import Input
from keras.callbacks import TensorBoard
from keras.engine import Model
from keras.layers import Dense, Conv1D, GlobalAveragePooling1D, Dropout
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from topoml_util import geom_scaler
from topoml_util.slack_send import notify
SCRIPT_VERSION = '2.0.3'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SIGNATURE = SCRIPT_NAME + ' ' + SCRIPT_VERSION + ' ' + TIMESTAMP
DATA_FOLDER = '../files/buildings/'
TRAIN_DATA_FILE = 'buildings_train_v7.npz'
TEST_DATA_FILE = 'buildings_test_v7.npz'
TRAIN_DATA_URL = 'https://dataverse.nl/api/access/datafile/11381'
TEST_DATA_URL = 'https://dataverse.nl/api/access/datafile/11380'
SCRIPT_START = time()
# Hyperparameters
hp = {
'BATCH_SIZE': int(os.getenv('BATCH_SIZE', 32)),
'TRAIN_VALIDATE_SPLIT': float(os.getenv('TRAIN_VALIDATE_SPLIT', 0.1)),
'REPEAT_DEEP_ARCH': int(os.getenv('REPEAT_DEEP_ARCH', 0)),
'DENSE_SIZE': int(os.getenv('DENSE_SIZE', 32)),
'EPOCHS': int(os.getenv('EPOCHS', 200)),
'LEARNING_RATE': float(os.getenv('LEARNING_RATE', 1e-4)),
'DROPOUT': float(os.getenv('DROPOUT', 0.0)),
'GEOM_SCALE': float(os.getenv("GEOM_SCALE", 0)), # If no default or 0: overridden when data is known
}
OPTIMIZER = Adam(lr=hp['LEARNING_RATE'])
# Load training data
path = Path(DATA_FOLDER + TRAIN_DATA_FILE)
if not path.exists():
print("Retrieving training data from web...")
urlretrieve(TRAIN_DATA_URL, DATA_FOLDER + TRAIN_DATA_FILE)
train_loaded = np.load(DATA_FOLDER + TRAIN_DATA_FILE)
train_geoms = train_loaded['fixed_size_geoms']
train_labels = train_loaded['building_type']
# Determine final test mode or standard
if len(sys.argv) > 1 and sys.argv[1] in ['-t', '--test']:
print('Training in final test mode')
path = Path(DATA_FOLDER + TEST_DATA_FILE)
if not path.exists():
print("Retrieving test data from web...")
urlretrieve(TEST_DATA_URL, DATA_FOLDER + TEST_DATA_FILE)
test_loaded = np.load(DATA_FOLDER + TEST_DATA_FILE)
test_geoms = test_loaded['fixed_size_geoms']
test_labels = test_loaded['building_type']
else:
print('Training in standard training mode')
# Split the training data in random seen/unseen sets
train_geoms, test_geoms, train_labels, test_labels = train_test_split(train_geoms, train_labels, test_size=0.1)
# Normalize
geom_scale = hp['GEOM_SCALE'] or geom_scaler.scale(train_geoms)
train_geoms = geom_scaler.transform(train_geoms, geom_scale)
test_geoms = geom_scaler.transform(test_geoms, geom_scale) # re-use variance from training
# Map types to one-hot vectors
# noinspection PyUnresolvedReferences
train_targets = np.zeros((len(train_labels), train_labels.max() + 1))
for index, building_type in enumerate(train_labels):
train_targets[index, building_type] = 1
# Shape determination
geom_max_points, geom_vector_len = train_geoms.shape[1:]
output_size = train_targets.shape[-1]
# Build model
inputs = Input(shape=(geom_max_points, geom_vector_len))
model = Conv1D(filters=32, kernel_size=(5,), activation='relu')(inputs)
model = Conv1D(filters=48, kernel_size=(5,), activation='relu', strides=2)(model)
model = Conv1D(filters=64, kernel_size=(5,), activation='relu', strides=2)(model)
model = GlobalAveragePooling1D()(model)
model = Dense(hp['DENSE_SIZE'], activation='relu')(model)
model = Dropout(hp['DROPOUT'])(model)
model = Dense(output_size, activation='softmax')(model)
model = Model(inputs=inputs, outputs=model)
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=OPTIMIZER),
model.summary()
# Callbacks
callbacks = [TensorBoard(log_dir='./tensorboard_log/' + SIGNATURE, write_graph=False)]
history = model.fit(
x=train_geoms,
y=train_targets,
epochs=hp['EPOCHS'],
batch_size=hp['BATCH_SIZE'],
validation_split=hp['TRAIN_VALIDATE_SPLIT'],
callbacks=callbacks).history
# Run on unseen test data
test_pred = [np.argmax(prediction) for prediction in model.predict(test_geoms)]
accuracy = accuracy_score(test_labels, test_pred)
runtime = time() - SCRIPT_START
message = 'on {} completed with accuracy of \n{:f} \nin {} in {} epochs\n'.format(
socket.gethostname(), accuracy, timedelta(seconds=runtime), len(history['val_loss']))
for key, value in sorted(hp.items()):
message += '{}: {}\t'.format(key, value)
notify(SIGNATURE, message)
print(SCRIPT_NAME, 'finished successfully with', message)
| mit |
PyRsw/PyRsw | examples/example_1D_BickleyJet.py | 1 | 2832 | import numpy as np
import matplotlib.pyplot as plt
import sys
# Add the PyRsw tools to the path
# At the moment it is given explicitely.
# In the future, it could also be added to the
# pythonpath environment variable
sys.path.append('../src')
import Steppers as Step
import Fluxes as Flux
from PyRsw import Simulation
from constants import minute, hour, day
sim = Simulation() # Create a simulation object
sim.run_name = '1D Bickley Jet'
# Geometry and Model Equations
sim.geomx = 'periodic' # Geometry Types: 'periodic' or 'walls'
sim.geomy = 'walls'
sim.stepper = Step.AB3 # Time-stepping algorithm: Euler, AB2, RK4
sim.method = 'Spectral' # Numerical method: 'Spectral'
sim.dynamics = 'Nonlinear' # Dynamics: 'Nonlinear' or 'Linear'
sim.flux_method = Flux.spectral_sw # Flux method: spectral_sw is only option currently
# Specify paramters
sim.Lx = 200e3 # Domain extent (m)
sim.Ly = 200e3 # Domain extent (m)
sim.Nx = 1 # Grid points in x
sim.Ny = 128 # Grid points in y
sim.Nz = 1 # Number of layers
sim.g = 9.81 # Gravity (m/sec^2)
sim.f0 = 1.e-4 # Coriolis (1/sec)
sim.beta = 0e-10 # Coriolis beta (1/m/sec)
sim.cfl = 0.1 # CFL coefficient (m)
sim.Hs = [100.] # Vector of mean layer depths (m)
sim.rho = [1025.] # Vector of layer densities (kg/m^3)
sim.end_time = 14*24.*hour # End Time (sec)
# Parallel? Only applies to the FFTWs
sim.num_threads = 4
# Plotting parameters
sim.plott = 30.*minute # Period of plots
sim.animate = 'Save' # 'Save' to create video frames,
# 'Anim' to animate,
# 'None' otherwise
sim.plot_vars = ['h']
sim.ylims=[[-0.1,0.1]]
#sim.plot_vars = ['vort','div']
#sim.clims = [ [-0.8, 0.8],[-0.1, 0.1]]
# Output parameters
sim.output = False # True or False
sim.savet = 1.*hour # Time between saves
# Diagnostics parameters
sim.diagt = 2.*minute # Time for output
sim.diagnose = False # True or False
# Initialize the grid and zero solutions
sim.initialize()
for ii in range(sim.Nz): # Set mean depths
sim.soln.h[:,:,ii] = sim.Hs[ii]
# Bickley Jet initial conditions
# First we define the jet
Ljet = 10e3 # Jet width
amp = 0.1 # Elevation of free-surface in basic state
sim.soln.h[:,:,0] += -amp*np.tanh(sim.Y/Ljet)
sim.soln.u[:,:,0] = sim.g*amp/(sim.f0*Ljet)/(np.cosh(sim.Y/Ljet)**2)
# Then we add on a random perturbation
sim.soln.u[:,:,0] += 0e-3*np.exp(-(sim.Y/Ljet)**2)*np.random.randn(sim.Nx,sim.Ny)
sim.run() # Run the simulation
| mit |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py | 73 | 7068 | import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| gpl-3.0 |
ForTozs/py3radar | setup.py | 1 | 3447 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 - 2013
# Matías Herranz <matiasherranz@gmail.com>
# Joaquín Tita <joaquintita@gmail.com>
#
# https://github.com/PyRadar/pyradar
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
# DOCS
#===============================================================================
"""This file is for distribute pyradar with setuptools
"""
#===============================================================================
# IMPORTS
#===============================================================================
import sys
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import pyradar
#===============================================================================
# CONSTANTS
#===============================================================================
PYPI_REQUIRE = [
"Pillow",
"numpy",
"matplotlib",
"scipy"
]
MANUAL_REQUIRE = {
"gdal" : "http://gdal.org/",
}
# sugerido pero no importante
SUGESTED = {
}
#===============================================================================
# WARNINGS FOR MANUAL REQUIRES AND SUGGESTED
#===============================================================================
def validate_modules(requires):
not_found = []
for name, url in list(requires.items()):
try:
__import__(name)
except ImportError:
not_found.append("{} requires '{}' ({})".format(pyradar.PRJ,
name, url))
return not_found
def print_not_found(not_found, msg):
limits = "=" * max(list(map(len, not_found)))
print(("\n{}\n{}\n{}\n{}\n".format(msg, limits, "\n".join(not_found), limits)))
not_found = validate_modules(MANUAL_REQUIRE)
if not_found:
print_not_found(not_found, "ERROR")
sys.exit(1)
not_found = validate_modules(SUGESTED)
if not_found:
print_not_found(not_found, "WARNING")
#===============================================================================
# FUNCTIONS
#===============================================================================
setup(
name=pyradar.PRJ.lower(),
version=pyradar.STR_VERSION,
description=pyradar.SHORT_DESCRIPTION,
author=pyradar.AUTHOR,
author_email=pyradar.EMAIL,
url=pyradar.URL,
license=pyradar.LICENSE,
keywords=pyradar.KEYWORDS,
classifiers=pyradar.CLASSIFIERS,
packages=[pkg for pkg in find_packages() if pkg.startswith("pyradar")],
include_package_data=True,
package_data={
'ExampleImages': ['pyradar/simulate/ExampleImages/*'],
'DemoSet' : ['pyradar/simulate/DemoSet/*'],
},
py_modules=["ez_setup"],
install_requires=PYPI_REQUIRE,
)
| lgpl-3.0 |