repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_qt4agg.py | 10 | 2177 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_qt5agg import FigureCanvasQTAggBase as _FigureCanvasQTAggBase
from .backend_agg import FigureCanvasAgg
from .backend_qt4 import QtCore
from .backend_qt4 import FigureManagerQT
from .backend_qt4 import FigureCanvasQT
from .backend_qt4 import NavigationToolbar2QT
##### not used
from .backend_qt4 import show
from .backend_qt4 import draw_if_interactive
from .backend_qt4 import backend_version
######
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt4agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAggBase(_FigureCanvasQTAggBase):
def __init__(self, figure):
self._agg_draw_pending = False
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasQTAggBase.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = []
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| gpl-3.0 |
nixphix/ml-projects | sentiment_analysis/twitter_sentiment_analysis-jallikattu/code/twitter_sentiment_analysis-jallikattu_FINAL.py | 1 | 11757 |
# coding: utf-8
# ### Sentiment Analysis on "Jallikattu" with Twitter Data Feed <h3 style="color:red;">#DataScienceForSocialCause</h3>
#
# Twitter is flooded with Jallikattu issue, let us find peoples sentiment with Data Science tools. Following is the approach
# * Register a Twitter API handle for data feed
# * Pull out tweets on search query 'jallikattu'
# * Using NLP packages find the sentiment of the tweet (Positive, Neutral or Negative)
# * Plot pie chart of the sentiment
# * Plot a masked word cloud of tags used
#
# Finall output we expect is a masked word cloud of popular tags used in twitter with font size propotional to the frequency of use. Let's dive in ...
# ### Loading necessary packages
#
# In particular we will be using tweepy to register an api handle with twitter and get the data feed. [Tweepy Document](http://docs.tweepy.org/en/v3.5.0/)
# TextBlob package to determine the sentiment of the tweets. [TextBlob Document](https://textblob.readthedocs.io/en/dev/)
#
#
# In[1]:
# import tweepy for twitter datastream and textblob for processing tweets
import tweepy
import textblob
# wordcloud package is used to produce the cool masked tag cloud above
from wordcloud import WordCloud
# pickle to serialize/deserialize python objects
import pickle
# regex package to extract hasttags from tweets
import re
# os for loading files from local system, matplotlib, np and PIL for ploting
from os import path
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
# ### We will create a Twitter API handle for fetching data
#
# * Inorder to qualify for a Twitter API handle you need to be a **`Phone Verified`** Twitter user.
# 1. Goto Twitter settings page [twitter.com/settings/account](https://twitter.com/settings/account)
# 2. Choose Mobile tab on left pane, then enter your phone number and verify by OTP
# 3. Now you should be able to register new API handle for your account for `programmatic tweeting`
#
#
# * Now goto Twitter [Application Management](https://apps.twitter.com/) page
# * Click *Create New App*button
# 1. Enter a Unique App name(global namespace), you might have to try few time to get it correct
# 2. Description can be anything you wish
# 3. website can be some <yourname>.com, you dont really have to own the domain
# 4. Leave the callback URL empty, agree to the terms and condition unconditionally
# 5. Click create
#
#
# * You can find the api credentials in [Application Management](https://apps.twitter.com/) consol
# * Choose the App and goto *keys and access tokens* tab to get API_KEY, API_SECRET, ACCESS_TOKEN and ACCESS_TOKEN_SECRET
#
# #### RUN THE CODE BLOCK BELOW ONLY ON FIRST TIME YOU CONFIGURE THE TWITTER API
# In[ ]:
# make sure to exclued this folder in git ignore
path_to_cred_file = path.abspath('../restricted/api_credentials.p')
# we will store twitter handle credentials in a pickle file (object de-serialization)
# code for pickling credentials need to be run only once during initial configuration
# fill the following dictionary with your twitter credentials
twitter_credentials = {'api_key':'API_KEY', 'api_secret':'API_SECRET', 'access_token':'ACCESS_TOKEN', 'access_token_secret':'ACCESS_TOKEN_SECRET'}
pickle.dump(twitter_credentials,open(path_to_cred_file, "wb"))
print("Pickled credentials saved to :\n"+path_to_cred_file+"\n")
print("\n".join(["{:20} : {}".format(key,value) for key,value in twitter_credentials.items()]))
# #### From second run you can load the credentials securely form stored file
# If you want to check the credentials uncomment the last line in below code block
# In[2]:
# make sure to exclued this folder in git ignore
path_to_cred_file = path.abspath('../restricted/api_credentials.p')
# load saved twitter credentials
twitter_credentials = pickle.load(open(path_to_cred_file,'rb'))
#print("\n".join(["{:20} : {}".format(key,value) for key,value in twitter_credentials.items()]))
# ### Creating an Open Auth Instance
# With the created api and token we will open an open auth instance to authenticate our twitter account.
#
# If you feel that your twitter api credentials have been compromised you can just generate a new set of access token-secret pair, access token is like RSA to authenticate your api key.
# In[3]:
# lets create an open authentication handler and initialize it with our twitter handlers api key
auth = tweepy.OAuthHandler(twitter_credentials['api_key'],twitter_credentials['api_secret'])
# access token is like password for the api key,
auth.set_access_token(twitter_credentials['access_token'],twitter_credentials['access_token_secret'])
# ### Twitter API Handle
#
# Tweepy comes with a Twitter API wrapper class called 'API', passing the open auth instance to this API creates a live Twitter handle to our account.
#
#
# **ATTENTION: Please beware that this is a handle you your own account not any pseudo account, if you tweet something with this it will be your tweet** This is the reason I took care not to expose my api credentials, if you expose anyone can mess up your Twitter account.
#
#
# Let's open the twitter handle and print the Name and Location of the twitter account owner, you should be seeing your name.
# In[4]:
# lets create an instance of twitter api wrapper
api = tweepy.API(auth)
# lets do some self check
user = api.me()
print("{}\n{}".format(user.name,user.location))
# ### Inspiration for this Project
# I drew inspiration for this project from the ongoing issue on traditional bull fighting AKA *Jallikattu*. Here I'm trying read pulse of the people based on tweets.
#
# We are searching for key word *Jallikattu* in Twitters public tweets, in the retured search result we are taking 150 tweets to do our **Sentiment Analysis**. Please dont go for large number of tweets there is an upper limit of 450 tweets, for more on api rate limits checkout [Twitter Developer Doc](https://dev.twitter.com/rest/public/rate-limits).
# In[5]:
# now lets get some data to check the sentiment on it
# lets search for key word jallikattu and check the sentiment on it
query = 'jallikattu'
tweet_cnt = 150
peta_tweets = api.search(q=query,count=tweet_cnt)
# ### Processing Tweets
#
# Once we get the tweets, we will iterate through the tweets and do following oprations
# 1. Pass the tweet text to TextBlob to process the tweet
# 2. Processed tweets will have two attributes
# * Polarity which is a numerical value between -1 to 1, the sentiment of the text can be infered from this.
# * Subjectivity this shows wheather the text is stated as a fact or an opinion, value ranges from 0 to 1
# 3. For each tweet we will find sentiment of the text (positive, neutral or negative) and update a counter variable accordingly, this counter is later ploted as a **pie chart**.
# 4. Then we pass the tweet text to a regular expression to extract hash tags, which we later use to create an awesome **word cloud visualization**.
# In[6]:
# lets go over the tweets
sentiment_polarity = [0,0,0]
tags = []
for tweet in peta_tweets:
processed_tweet = textblob.TextBlob(tweet.text)
polarity = processed_tweet.sentiment.polarity
upd_index = 0 if polarity > 0 else (1 if polarity == 0 else 2)
sentiment_polarity[upd_index] = sentiment_polarity[upd_index]+1
tags.extend(re.findall(r"#(\w+)", tweet.text))
#print(tweet.text)
#print(processed_tweet.sentiment,'\n')
sentiment_label = ['Positive','Neutral','Negative']
#print("\n".join(["{:8} tweets count {}".format(s,val) for s,val in zip(sentiment_label,sentiment_polarity)]))
# plotting sentiment pie chart
colors = ['yellowgreen', 'gold', 'coral']
# lets explode the positive sentiment for visual appeal
explode = (0.1, 0, 0)
plt.pie(sentiment_polarity,labels=sentiment_label,colors=colors,explode=explode,shadow=True,autopct='%1.1f%%')
plt.axis('equal')
plt.legend(bbox_to_anchor=(1.3,1))
plt.title('Twitter Sentiment on \"'+query+'\"')
plt.show()
# ### Sentiment Analysis
#
# We can see that majority is neutral which is contributed by
# 1. Tweets with media only(photo, video)
# 2. Tweets in regional language. Textblob do not work on our indian languages.
# 3. Some tweets contains only stop words or the words that do not give any positive or negative perspective.
# 4. Polarity is calculated by the number of positive words like "great, awesome, etc." or negative words like "hate, bad, etc"
#
# One more point to note is that TextBlob is not a complete NLP package it does not do context aware search, such sophisticated deep learing abilities are available only with likes of Google.
# In[7]:
# lets process the hash tags in the tweets and make a word cloud visualization
# normalizing tags by converting all tags to lowercase
tags = [t.lower() for t in tags]
# get unique count of tags to take count for each
uniq_tags = list(set(tags))
tag_count = []
# for each unique hash tag take frequency of occurance
for tag in uniq_tags:
tag_count.append((tag,tags.count(tag)))
# lets print the top five tags
tag_count =sorted(tag_count,key=lambda x:-x[1])[:5]
print("\n".join(["{:8} {}".format(tag,val) for tag,val in tag_count]))
# ### Simple Word Cloud with Twitter #tags
#
# Let us viualize the tags used in for Jallikattu by creating a tag cloud. The wordcloud package takes a single string of tags separated by whitespace. We will concatinate the tags and pass it to generate method to create a tag cloud image.
# In[8]:
# we will create a vivid tag cloud visualization
# creating a single string of texts from tags, the tag's font size is proportional to its frequency
text = " ".join(tags)
# this generates an image from the long string, if you wish you may save it to local
wc = WordCloud().generate(text)
# we will display the image with matplotlibs image show, removed x and y axis ticks
plt.imshow(wc)
plt.axis("off")
plt.show()
# ### Masked Word Cloud
#
# The tag cloud can be masked using a grascale stencil image the wordcloud package neatly arranges the word in side the mask image. I have supreimposed generated word cloud image on to the mask image to provide a detailing otherwise the background of the word cloud will be white and it will appeare like words are hanging in space instead.
#
#
# Inorder to make the image superimposing work well, we need to manipulate image transparency using image alpha channel. If you look at the visual only fine detail of mask image is seen in the tag cloud this is bacause word cloud is layed on mask image and the transparency of word cloud image is 90% so only 10% of mask image is seen.
# In[11]:
# we can also create a masked word cloud from the tags by using grayscale image as stencil
# lets load the mask image from local
bull_mask = np.array(Image.open(path.abspath('../asset/bull_mask_1.jpg')))
wc_mask = WordCloud(background_color="white", mask=bull_mask).generate(text)
mask_image = plt.imshow(bull_mask, cmap=plt.cm.gray)
word_cloud = plt.imshow(wc_mask,alpha=0.9)
plt.axis("off")
plt.title("Twitter Hash Tag Word Cloud for "+query)
plt.show()
# The tag cloud marks the key moments like the call for protest in Chennai Marina, Alanganallur. Also shows one of a leading actors support for the cause and calls for ban on peta.
#
# This code will give different output over time as new tweet are added in timeline and old ones are pushed down,
# #### Thank you for showing intrest in my work
# If you liked it and want to be notified of my future work follow me on
#
#
# [Knowme](https://knome.ultimatix.net/users/286632-prabakaran-k)
#
#
# [@iPrabakaran](https://twitter.com/iPrabakaran) Twitter
#
#
# [GitHub](https://github.com/nixphix)
| mit |
pdamodaran/yellowbrick | tests/checks.py | 1 | 4707 | # tests.checks
# Performs checking that visualizers adhere to Yellowbrick conventions.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Mon May 22 11:18:06 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: checks.py [4131cb1] benjamin@bengfort.com $
"""
Performs checking that visualizers adhere to Yellowbrick conventions.
"""
##########################################################################
## Imports
##########################################################################
import sys
sys.path.append("..")
import numpy as np
import matplotlib.pyplot as plt
from yellowbrick.base import ModelVisualizer, ScoreVisualizer
from yellowbrick.classifier.base import ClassificationScoreVisualizer
from yellowbrick.cluster.base import ClusteringScoreVisualizer
from yellowbrick.features.base import FeatureVisualizer, DataVisualizer
from yellowbrick.regressor.base import RegressionScoreVisualizer
from yellowbrick.text.base import TextVisualizer
##########################################################################
## Checking runable
##########################################################################
def check_visualizer(Visualizer):
"""
Check if visualizer adheres to Yellowbrick conventions.
This function runs an extensive test-suite for input validation, return
values, exception handling, and more. Additional tests for scoring or
tuning visualizers will be run if the Visualizer clss inherits from the
corresponding object.
"""
name = Visualizer.__name__
for check in _yield_all_checks(name, Visualizer):
check(name, Visualizer)
##########################################################################
## Generate the specific per-visualizer checking
##########################################################################
def _yield_all_checks(name, Visualizer):
"""
Composes the checks required for the specific visualizer.
"""
# Global Checks
yield check_instantiation
yield check_estimator_api
# Visualizer Type Checks
if issubclass(Visualizer, RegressionScoreVisualizer):
for check in _yield_regressor_checks(name, Visualizer):
yield check
if issubclass(Visualizer, ClassificationScoreVisualizer):
for check in _yield_classifier_checks(name, Visualizer):
yield check
if issubclass(Visualizer, ClusteringScoreVisualizer):
for check in _yield_clustering_checks(name, Visualizer):
yield check
if issubclass(Visualizer, FeatureVisualizer):
for check in _yield_feature_checks(name, Visualizer):
yield check
if issubclass(Visualizer, TextVisualizer):
for check in _yield_text_checks(name, Visualizer):
yield check
# Other checks
def _yield_regressor_checks(name, Visualizer):
"""
Checks for regressor visualizers
"""
pass
def _yield_classifier_checks(name, Visualizer):
"""
Checks for classifier visualizers
"""
pass
def _yield_clustering_checks(name, Visualizer):
"""
Checks for clustering visualizers
"""
pass
def _yield_feature_checks(name, Visualizer):
"""
Checks for feature visualizers
"""
pass
def _yield_text_checks(name, Visualizer):
"""
Checks for text visualizers
"""
pass
##########################################################################
## Checking Functions
##########################################################################
def check_instantiation(name, Visualizer, args, kwargs):
# assert that visualizers can be passed an axes object.
ax = plt.gca()
viz = Visualizer(*args, **kwargs)
assert viz.ax == ax
def check_estimator_api(name, Visualizer):
X = np.random.rand((5, 10))
y = np.random.randint(0,2, 10)
# Ensure fit returns self.
viz = Visualizer()
self = viz.fit(X, y)
assert viz == self
if __name__ == '__main__':
import sys
sys.path.append("..")
from yellowbrick.classifier import *
from yellowbrick.cluster import *
from yellowbrick.features import *
from yellowbrick.regressor import *
from yellowbrick.text import *
visualizers = [
ClassBalance, ClassificationReport, ConfusionMatrix, ROCAUC,
KElbowVisualizer, SilhouetteVisualizer,
ScatterVisualizer, JointPlotVisualizer, Rank2D, RadViz, ParallelCoordinates,
AlphaSelection, ManualAlphaSelection,
PredictionError, ResidualsPlot,
TSNEVisualizer, FreqDistVisualizer, PosTagVisualizer
]
for visualizer in visualizers:
check_visualizer(visualizer)
| apache-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/dataframe/tests/test_merge_column_and_index.py | 5 | 5592 | import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.utils import assert_eq, PANDAS_VERSION
# Fixtures
# ========
@pytest.fixture
def df_left():
# Create frame with 10 partitions
# Frame has 11 distinct idx values
partition_sizes = np.array([3, 4, 2, 5, 3, 2, 5, 9, 4, 7, 4])
idx = [i for i, s in enumerate(partition_sizes) for _ in range(s)]
k = [i for s in partition_sizes for i in range(s)]
vi = range(len(k))
return pd.DataFrame(dict(
idx=idx,
k=k,
v1=vi
)).set_index(['idx'])
@pytest.fixture
def df_right():
# Create frame with 10 partitions
# Frame has 11 distinct idx values
partition_sizes = np.array([4, 2, 5, 3, 2, 5, 9, 4, 7, 4, 8])
idx = [i for i, s in enumerate(partition_sizes) for _ in range(s)]
k = [i for s in partition_sizes for i in range(s)]
vi = range(len(k))
return pd.DataFrame(dict(
idx=idx,
k=k,
v1=vi
)).set_index(['idx'])
@pytest.fixture
def ddf_left(df_left):
# Create frame with 10 partitions
# Skip division on 2 so there is one mismatch with ddf_right
return dd.repartition(df_left, [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11])
@pytest.fixture
def ddf_left_unknown(ddf_left):
return ddf_left.clear_divisions()
@pytest.fixture
def ddf_left_single(df_left):
return dd.from_pandas(df_left, npartitions=1, sort=False)
@pytest.fixture
def ddf_right(df_right):
# Create frame with 10 partitions
# Skip division on 3 so there is one mismatch with ddf_left
return dd.repartition(df_right, [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11])
@pytest.fixture
def ddf_right_unknown(ddf_right):
return ddf_right.clear_divisions()
@pytest.fixture
def ddf_right_single(df_right):
return dd.from_pandas(df_right, npartitions=1, sort=False)
@pytest.fixture(params=['inner', 'left', 'right', 'outer'])
def how(request):
return request.param
@pytest.fixture(params=[
'idx',
['idx'],
['idx', 'k'],
['k', 'idx']])
def on(request):
return request.param
# Tests
# =====
@pytest.mark.skipif(PANDAS_VERSION < '0.23.0',
reason="Need pandas col+index merge support (pandas-dev/pandas#14355)")
def test_merge_known_to_known(df_left, df_right, ddf_left, ddf_right, on, how):
# Compute expected
expected = df_left.merge(df_right, on=on, how=how)
# Perform merge
result = ddf_left.merge(ddf_right, on=on, how=how, shuffle='tasks')
# Assertions
assert_eq(result, expected)
assert_eq(result.divisions, tuple(range(12)))
assert len(result.__dask_graph__()) < 80
@pytest.mark.skipif(PANDAS_VERSION < '0.23.0',
reason="Need pandas col+index merge support (pandas-dev/pandas#14355)")
@pytest.mark.parametrize('how', ['inner', 'left'])
def test_merge_known_to_single(df_left, df_right, ddf_left, ddf_right_single, on, how):
# Compute expected
expected = df_left.merge(df_right, on=on, how=how)
# Perform merge
result = ddf_left.merge(ddf_right_single, on=on, how=how, shuffle='tasks')
# Assertions
assert_eq(result, expected)
assert_eq(result.divisions, ddf_left.divisions)
assert len(result.__dask_graph__()) < 30
@pytest.mark.skipif(PANDAS_VERSION < '0.23.0',
reason="Need pandas col+index merge support (pandas-dev/pandas#14355)")
@pytest.mark.parametrize('how', ['inner', 'right'])
def test_merge_single_to_known(df_left, df_right, ddf_left_single, ddf_right, on, how):
# Compute expected
expected = df_left.merge(df_right, on=on, how=how)
# Perform merge
result = ddf_left_single.merge(ddf_right, on=on, how=how, shuffle='tasks')
# Assertions
assert_eq(result, expected)
assert_eq(result.divisions, ddf_right.divisions)
assert len(result.__dask_graph__()) < 30
@pytest.mark.skipif(PANDAS_VERSION < '0.23.0',
reason="Need pandas col+index merge support (pandas-dev/pandas#14355)")
def test_merge_known_to_unknown(df_left, df_right, ddf_left, ddf_right_unknown, on, how):
# Compute expected
expected = df_left.merge(df_right, on=on, how=how)
# Perform merge
result = ddf_left.merge(ddf_right_unknown, on=on, how=how, shuffle='tasks')
# Assertions
assert_eq(result, expected)
assert_eq(result.divisions, tuple(None for _ in range(11)))
assert len(result.__dask_graph__()) >= 400
@pytest.mark.skipif(PANDAS_VERSION < '0.23.0',
reason="Need pandas col+index merge support (pandas-dev/pandas#14355)")
def test_merge_unknown_to_known(df_left, df_right, ddf_left_unknown, ddf_right, on, how):
# Compute expected
expected = df_left.merge(df_right, on=on, how=how)
# Perform merge
result = ddf_left_unknown.merge(ddf_right, on=on, how=how, shuffle='tasks')
# Assertions
assert_eq(result, expected)
assert_eq(result.divisions, tuple(None for _ in range(11)))
assert len(result.__dask_graph__()) > 400
@pytest.mark.skipif(PANDAS_VERSION < '0.23.0',
reason="Need pandas col+index merge support (pandas-dev/pandas#14355)")
def test_merge_unknown_to_unknown(df_left, df_right, ddf_left_unknown, ddf_right_unknown, on, how):
# Compute expected
expected = df_left.merge(df_right, on=on, how=how)
# Merge unknown to unknown
result = ddf_left_unknown.merge(ddf_right_unknown, on=on, how=how, shuffle='tasks')
# Assertions
assert_eq(result, expected)
assert_eq(result.divisions, tuple(None for _ in range(11)))
assert len(result.__dask_graph__()) > 400
| gpl-3.0 |
hakonsbm/nest-simulator | extras/ConnPlotter/examples/connplotter_tutorial.py | 18 | 27730 | # -*- coding: utf-8 -*-
#
# connplotter_tutorial.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# !========================
# ! ConnPlotter: A Tutorial
# !========================
# !
# ! :Author: Hans Ekkehard Plesser
# ! :Institution: Norwegian University of Life Sciences, Simula
# ! Research Laboratory, RIKEN Brain Sciences Institute
# ! :Version: 0.7
# ! :Date: 1 December 2009
# ! :Copyright: Hans Ekkehard Plesser
# ! :License: Creative Commons Attribution-Noncommercial-Share Alike License
# ! v 3.0
# !
# ! :Note: For best results, you should run this script with PyReport by
# ! Gael Varoquaux, available from
# ! http://gael-varoquaux.info/computers/pyreport/
# !
# ! Please set using_pyreport to True if you want to run the
# ! script through pyreport. Otherwise, figures will not be captured
# ! correctly.
using_pyreport = False
# ! Introduction
# !=============
# ! This tutorial gives a brief introduction to the ConnPlotter
# ! toolbox. It is by no means complete.
# ! Avoid interactive backend when using pyreport
if using_pyreport:
import matplotlib
matplotlib.use("Agg")
# ! Import pylab to call pylab.show() so that pyreport
# ! can capture figures created. Must come before import
# ! ConnPlotter so we get the correct show().
import pylab
# ! If not using pyreport, disable pylab.show() until we reach end of script
if not using_pyreport:
pylab_show = pylab.show
def nop(s=None):
pass
pylab.show = nop
# ! Import ConnPlotter and its examples
import ConnPlotter as cpl
import ConnPlotter.examples as ex
# ! Turn of warnings about resized figure windows
import warnings
warnings.simplefilter("ignore")
# ! Define a helper function to show LaTeX tables on the fly
def showTextTable(connPattern, fileTrunk):
"""
Shows a Table of Connectivity as textual table.
Arguments:
connPattern ConnectionPattern instance
fileTrunk Eventual PNG image will be fileTrunk.png
"""
import subprocess as subp # to call LaTeX etc
import os # to remove files
# Write to LaTeX file so we get a nice textual representation
# We want a complete LaTeX document, so we set ``standalone``
# to ``True``.
connPattern.toLaTeX(file=fileTrunk + '.tex', standalone=True,
enumerate=True)
# Create PDF, crop, and convert to PNG
try:
devnull = open('/dev/null', 'w')
subp.call(['pdflatex', fileTrunk], stdout=devnull, stderr=subp.STDOUT)
# need wrapper, since pdfcrop does not begin with #!
subp.call(['pdfcrop ' + fileTrunk + '.pdf ' + fileTrunk + '-crop.pdf'],
shell=True,
stdout=devnull, stderr=subp.STDOUT)
devnull.close()
os.rename(fileTrunk + '-crop.pdf', fileTrunk + '.pdf')
for suffix in ('.tex', '-crop.pdf', '.png', '.aux', '.log'):
if os.path.exists(fileTrunk + suffix):
os.remove(fileTrunk + suffix)
except:
raise Exception('Could not create PDF Table.')
# ! Simple network
# ! ==============
# ! This is a simple network with two layers A and B; layer B has two
# ! populations, E and I. On the NEST side, we use only synapse type
# ! ``static_synapse``. ConnPlotter then infers that synapses with positive
# ! weights should have type ``exc``, those with negative weight type ``inh``.
# ! Those two types are know to ConnPlotter.
# ! Obtain layer, connection and model list from the example set
s_layer, s_conn, s_model = ex.simple()
# ! Create Connection Pattern representation
s_cp = cpl.ConnectionPattern(s_layer, s_conn)
# ! Show pattern as textual table (we cheat a little and include PDF directly)
showTextTable(s_cp, 'simple_tt')
# $ \centerline{\includegraphics{simple_tt.pdf}}
# ! Show pattern in full detail
# ! ---------------------------
# ! A separate patch is shown for each pair of populations.
# !
# ! - Rows represent senders, columns targets.
# ! - Layer names are given to the left/above, population names to the right
# ! and below.
# ! - Excitatory synapses shown in blue, inhibitory in red.
# ! - Each patch has its own color scale.
s_cp.plot()
pylab.show()
# ! Let us take a look at what this connection pattern table shows:
# !
# ! - The left column, with header "A", is empty: The "A" layer receives
# ! no input.
# ! - The right column shows input to layer "B"
# !
# ! * The top row, labeled "A", has two patches in the "B" column:
# !
# ! + The left patch shows relatively focused input to the "E" population
# ! in layer "B" (first row of "Connectivity" table).
# ! + The right patch shows wider input to the "I" population in layer
# ! "B" (second row of "Connectivity" table).
# ! + Patches are red, indicating excitatory connections.
# ! + In both cases, mask are circular, and the product of connection
# ! weight and probability is independent of the distance between sender
# ! and target neuron.
# !
# ! * The grey rectangle to the bottom right shows all connections from
# ! layer "B" populations to layer "B" populations. It is subdivided into
# ! two rows and two columns:
# !
# ! + Left column: inputs to the "E" population.
# ! + Right column: inputs to the "I" population.
# ! + Top row: projections from the "E" population.
# ! + Bottom row: projections from the "I" population.
# ! + There is only one type of synapse for each sender-target pair,
# ! so there is only a single patch per pair.
# ! + Patches in the top row, from population "E" show excitatory
# ! connections, thus they are red.
# ! + Patches in the bottom row, from population "I" show inhibitory
# ! connections, thus they are blue.
# ! + The patches in detail are:
# !
# ! - **E to E** (top-left, row 3+4 in table): two rectangular
# ! projections at 90 degrees.
# ! - **E to I** (top-right, row 5 in table): narrow gaussian projection.
# ! - **I to E** (bottom-left, row 6 in table): wider gaussian projection
# ! - **I to I** (bottom-right, row 7 in table): circular projection
# ! covering entire layer.
# !
# ! - **NB:** Color scales are different, so one **cannot** compare connection
# ! strengths between patches.
# ! Full detail, common color scale
# ! -------------------------------
s_cp.plot(globalColors=True)
pylab.show()
# ! This figure shows the same data as the one above, but now all patches use
# ! a common color scale, so full intensity color (either red or blue)
# ! indicates the strongest connectivity. From this we see that
# !
# ! - A to B/E is stronger than A to B/I
# ! - B/E to B/I is the strongest of all connections at the center
# ! - B/I to B/E is stronger than B/I to B/I
# ! Aggregate by groups
# ! -------------------
# ! For each pair of population groups, sum connections of the same type
# ! across populations.
s_cp.plot(aggrGroups=True)
pylab.show()
# ! In the figure above, all excitatory connections from B to B layer have been
# ! combined into one patch, as have all inhibitory connections from B to B.
# ! In the upper-right corner, all connections from layer A to layer B have
# ! been combined; the patch for inhibitory connections is missing, as there
# ! are none.
# ! Aggregate by groups and synapse models
# ! --------------------------------------
s_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! When aggregating across synapse models, excitatory and inhibitory
# ! connections are combined. By default, excitatory connections are weights
# ! with +1, inhibitory connections with -1 in the sum. This may yield kernels
# ! with positive and negative values. They are shown on a red-white-blue scale
# ! as follows:
# !
# ! - White always represents 0.
# ! - Positive values are represented by increasingly saturated red.
# ! - Negative values are represented by increasingly saturated blue.
# ! - Colorscales are separate for red and blue:
# !
# ! * largest positive value: fully saturated red
# ! * largest negative value: fully saturated blue
# !
# ! - Each patch has its own colorscales.
# ! - When ``aggrSyns=True`` is combined with ``globalColors=True``,
# ! all patches use the same minimum and maximum in their red and blue
# ! color scales. The the minimum is the negative of the maximum, so that
# ! blue and red intesities can be compared.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True)
pylab.show()
# ! - We can explicitly set the limits of the color scale; if values exceeding
# ! the limits are present, this is indicated by an arrowhead at the end of
# ! the colorbar. User-defined color limits need not be symmetric about 0.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True,
colorLimits=[-2, 3])
pylab.show()
# ! Save pattern to file
# ! --------------------
# s_cp.plot(file='simple_example.png')
# ! This saves the detailed diagram to the given file. If you want to save
# ! the pattern in several file formats, you can pass a tuple of file names,
# ! e.g., ``s_cp.plot(file=('a.eps', 'a.png'))``.
# !
# ! **NB:** Saving directly to PDF may lead to files with artifacts. We
# ! recommend to save to EPS and the convert to PDF.
# ! Build network in NEST
# ! ---------------------
import nest
import nest.topology as topo
# ! Create models
for model in s_model:
nest.CopyModel(model[0], model[1], model[2])
# ! Create layers, store layer info in Python variable
for layer in s_layer:
exec ('%s = topo.CreateLayer(layer[1])' % layer[0])
# ! Create connections, need to insert variable names
for conn in s_conn:
eval('topo.ConnectLayers(%s,%s,conn[2])' % (conn[0], conn[1]))
nest.Simulate(10)
# ! **Ooops:*** Nothing happened? Well, it did, but pyreport cannot capture the
# ! output directly generated by NEST. The absence of an error message in this
# ! place shows that network construction and simulation went through.
# ! Inspecting the connections actually created
# ! :::::::::::::::::::::::::::::::::::::::::::
# ! The following block of messy and makeshift code plots the targets of the
# ! center neuron of the B/E population in the B/E and the B/I populations.
B_top = nest.GetStatus(RG, 'topology')[0]
ctr_id = topo.GetElement(RG,
[int(B_top['rows'] / 2), int(B_top['columns'] / 2)])
# find excitatory element in B
E_id = [gid for gid in ctr_id
if nest.GetStatus([gid], 'model')[0] == 'E']
# get all targets, split into excitatory and inhibitory
alltgts = nest.GetStatus(
nest.GetConnections(E_id, synapse_model='static_synapse'), 'target')
Etgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'E']
Itgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'I']
# obtain positions of targets
Etpos = tuple(zip(*topo.GetPosition(Etgts)))
Itpos = tuple(zip(*topo.GetPosition(Itgts)))
# plot excitatory
pylab.clf()
pylab.subplot(121)
pylab.scatter(Etpos[0], Etpos[1])
ctrpos = pylab.array(topo.GetPosition(E_id)[0])
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99,
fc='r', alpha=0.4, ec='none'))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.4, -0.2)), 0.8, 0.4, zorder=1,
fc='none', ec='r', lw=3))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.2, -0.4)), 0.4, 0.8, zorder=1,
fc='none', ec='r', lw=3))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.5, -0.5)), 1.0, 1.0, zorder=1,
fc='none', ec='k', lw=3))
ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5],
xticks=[], yticks=[])
# plot inhibitory
pylab.subplot(122)
pylab.scatter(Itpos[0], Itpos[1])
ctrpos = topo.GetPosition(E_id)[0]
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99,
fc='r', alpha=0.4, ec='none'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.2, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.3, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.5, zorder=2,
fc='none', ec='r', lw=3))
ax.add_patch(pylab.Rectangle((-0.5, -0.5), 1.0, 1.0, zorder=1,
fc='none', ec='k', lw=3))
ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5],
xticks=[], yticks=[])
pylab.show()
# ! Thick red lines mark the mask, dashed red lines to the right one, two and
# ! three standard deviations. The sender location is marked by the red spot
# ! in the center. Layers are 40x40 in size.
# ! A more complex network
# ! ======================
# !
# ! This network has layers A and B, with E and I populations in B. The added
# ! complexity comes from the fact that we now have four synapse types: AMPA,
# ! NMDA, GABA_A and GABA_B. These synapse types are known to ConnPlotter.
# ! Setup and tabular display
c_layer, c_conn, c_model = ex.complex()
c_cp = cpl.ConnectionPattern(c_layer, c_conn)
showTextTable(c_cp, 'complex_tt')
# $ \centerline{\includegraphics{complex_tt.pdf}}
# ! Pattern in full detail
# ! ----------------------
c_cp.plot()
pylab.show()
# ! Note the following differences to the simple pattern case:
# !
# ! - For each pair of populations, e.g., B/E as sender and B/E as target,
# ! we now have two patches representing AMPA and NMDA synapse for the E
# ! population, GABA_A and _B for the I population.
# ! - Colors are as follows:
# !
# ! :AMPA: red
# ! :NMDA: orange
# ! :GABA_A: blue
# ! :GABA_B: purple
# ! - Note that the horizontal rectangular pattern (table line 3) describes
# ! AMPA synapses, while the vertical rectangular pattern (table line 4)
# ! describes NMDA synapses.
# ! Full detail, common color scale
# ! -------------------------------
c_cp.plot(globalColors=True)
pylab.show()
# ! As above, but now with a common color scale.
# ! **NB:** The patch for the B/I to B/I connection may look empty, but it
# ! actually shows a very light shade of red. Rules are as follows:
# !
# ! - If there is no connection between two populations, show the grey layer
# ! background.
# ! - All parts of the target layer that are outside the mask or strictly zero
# ! are off-white.
# ! - If it looks bright white, it is a very diluted shade of the color for the
# ! pertaining synpase type.
# ! Full detail, explicit color limits
# ! ----------------------------------
c_cp.plot(colorLimits=[0, 1])
pylab.show()
# ! As above, but the common color scale is now given explicitly.
# ! The arrow at the right end of the color scale indicates that the values
# ! in the kernels extend beyond +1.
# ! Aggregate by synapse models
# ! -----------------------------
# ! For each population pair, connections are summed across
# ! synapse models.
# !
# ! - Excitatory kernels are weighted with +1, inhibitory kernels with -1.
# ! - The resulting kernels are shown on a color scale ranging from red
# ! (inhibitory) via white (zero) to blue (excitatory).
# ! - Each patch has its own color scale
c_cp.plot(aggrSyns=True)
pylab.show()
# !
# ! - AMPA and NMDA connections from B/E to B/E are now combined to form a
# ! cross.
# ! - GABA_A and GABA_B connections from B/I to B/E are two concentric spots.
# ! Aggregate by population group
# ! ------------------------------
c_cp.plot(aggrGroups=True)
pylab.show()
# ! This is in many ways orthogonal to aggregation by synapse model:
# ! We keep synapse types separat, while we combine across populations. Thus,
# ! we have added the horizonal bar (B/E to B/E, row 3) with the spot
# ! (B/E to B/I, row 5).
# ! Aggregate by population group and synapse model
# ! -----------------------------------------------------------------
c_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! All connection are combined for each pair of sender/target layer.
# ! CPTs using the total charge deposited (TCD) as intensity
# ! -----------------------------------------------------------
# ! TCD-based CPTs are currently only available for the ht_neuron, since
# ! ConnPlotter does not know how to obtain \int g(t) dt from NEST for other
# ! conductance-based model neurons.
# ! We need to create a separate ConnectionPattern instance for each membrane
# ! potential we want to use in the TCD computation
c_cp_75 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-75.0)
c_cp_45 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-45.0)
# ! In order to obtain a meaningful comparison between both membrane
# ! potentials, we use the same global color scale.
# ! V_m = -75 mV
# ! ::::::::::::::
c_cp_75.plot(colorLimits=[0, 150])
pylab.show()
# ! V_m = -45 mV
# ! ::::::::::::::
c_cp_45.plot(colorLimits=[0, 150])
pylab.show()
# ! Note that the NMDA projection virtually vanishes for V_m=-75mV, but is very
# ! strong for V_m=-45mV. GABA_A and GABA_B projections are also stronger,
# ! while AMPA is weaker for V_m=-45mV.
# ! Non-Dale network model
# ! ======================
# ! By default, ConnPlotter assumes that networks follow Dale's law, i.e.,
# ! either make excitatory or inhibitory connections. If this assumption
# ! is violated, we need to inform ConnPlotter how synapse types are grouped.
# ! We look at a simple example here.
# ! Load model
nd_layer, nd_conn, nd_model = ex.non_dale()
# ! We specify the synapse configuration using the synTypes argument:
# !
# ! - synTypes is a tuple.
# ! - Each element in the tuple represents a group of synapse models
# ! - Any sender can make connections with synapses from **one group only**.
# ! - Each synapse model is specified by a ``SynType``.
# ! - The SynType constructor takes three arguments:
# !
# ! * The synapse model name
# ! * The weight to apply then aggregating across synapse models
# ! * The color to use for the synapse type
# !
# ! - Synapse names must be unique, and must form a superset of all synapse
# ! models in the network.
nd_cp = cpl.ConnectionPattern(nd_layer, nd_conn, synTypes=(
(cpl.SynType('exc', 1.0, 'b'), cpl.SynType('inh', -1.0, 'r')),))
showTextTable(nd_cp, 'non_dale_tt')
# $ \centerline{\includegraphics{non_dale_tt.pdf}}
nd_cp.plot()
pylab.show()
# ! Note that we now have red and blue patches side by side, as the same
# ! population can make excitatory and inhibitory connections.
# ! Configuring the ConnectionPattern display
# ! =========================================
# ! I will now show you a few ways in which you can configure how ConnPlotter
# ! shows connection patterns.
# ! User defined synapse types
# ! --------------------------
# !
# ! By default, ConnPlotter knows two following sets of synapse types.
# !
# ! exc/inh
# ! - Used automatically when all connections have the same synapse_model.
# ! - Connections with positive weight are assigned model exc, those with
# ! negative weight model inh.
# ! - When computing totals, exc has weight +1, inh weight -1
# ! - Exc is colored blue, inh red.
# !
# ! AMPA/NMDA/GABA_A/GABA_B
# ! - Used if the set of ``synapse_model`` s in the network is a subset of
# ! those four types.
# ! - AMPA/NMDA carry weight +1, GABA_A/GABA_B weight -1.
# ! - Colors are as follows:
# !
# ! :AMPA: blue
# ! :NMDA: green
# ! :GABA_A: red
# ! :GABA_B: magenta
# !
# !
# ! We saw a first example of user-defined synapse types in the non-Dale
# ! example above. In that case, we only changed the grouping. Here, I will
# ! demonstrate the effect of different ordering, weighting, and color
# ! specifications. We use the complex model from above as example.
# !
# ! *NOTE*: It is most likey a *bad idea* to change the colors or placement of
# ! synapse types. If everyone uses the same design rules, we will all be able
# ! to read each others figures much more easily.
# ! Placement of synapse types
# ! ::::::::::::::::::::::::::
# !
# ! The ``synTypes`` nested tuple defines the placement of patches for
# ! different synapse models. Default layout is
# !
# ! ====== ======
# ! AMPA NMDA
# ! GABA_A GABA_B
# ! ====== ======
# !
# ! All four matrix elements are shown in this layout only when using
# ! ``mode='layer'`` display. Otherwise, one or the other row is shown.
# ! Note that synapses that can arise from a layer simultaneously, must
# ! always be placed on one matrix row, i.e., in one group. As an example,
# ! we now invert placement, without any other changes:
cinv_syns = ((cpl.SynType('GABA_B', -1, 'm'), cpl.SynType('GABA_A', -1, 'r')),
(cpl.SynType('NMDA', 1, 'g'), cpl.SynType('AMPA', 1, 'b')))
cinv_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cinv_syns)
cinv_cp.plot()
pylab.show()
# ! Notice that on each row the synapses are exchanged compared to the original
# ! figure above. When displaying by layer, also the rows have traded place:
cinv_cp.plot(aggrGroups=True)
pylab.show()
# ! Totals are not affected:
cinv_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! Weighting of synapse types in ``totals`` mode
# ! :::::::::::::::::::::::::::::::::::::::::::::
# !
# ! Different synapses may have quite different efficacies, so weighting them
# ! all with +-1 when computing totals may give a wrong impression. Different
# ! weights can be supplied as second argument to SynTypes(). We return to the
# ! normal placement of synapses and
# ! create two examples with very different weights:
cw1_syns = ((cpl.SynType('AMPA', 10, 'b'), cpl.SynType('NMDA', 1, 'g')),
(cpl.SynType('GABA_A', -2, 'g'), cpl.SynType('GABA_B', -10, 'b')))
cw1_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw1_syns)
cw2_syns = ((cpl.SynType('AMPA', 1, 'b'), cpl.SynType('NMDA', 10, 'g')),
(cpl.SynType('GABA_A', -20, 'g'), cpl.SynType('GABA_B', -1, 'b')))
cw2_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw2_syns)
# ! We first plot them both in population mode
cw1_cp.plot(aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrSyns=True)
pylab.show()
# ! Finally, we plot them aggregating across groups and synapse models
cw1_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! Alternative colors for synapse patches
# ! ::::::::::::::::::::::::::::::::::::::
# ! Different colors can be specified using any legal color specification.
# ! Colors should be saturated, as they will be mixed with white. You may
# ! also provide a colormap explicitly. For this example, we use once more
# ! normal placement and weights. As all synapse types are shown in layer
# ! mode, we use that mode for display here.
cc_syns = (
(cpl.SynType('AMPA', 1, 'maroon'), cpl.SynType('NMDA', 1, (0.9, 0.5, 0))),
(cpl.SynType('GABA_A', -1, '0.7'), cpl.SynType('GABA_B', 1, pylab.cm.hsv)))
cc_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cc_syns)
cc_cp.plot(aggrGroups=True)
pylab.show()
# ! We get the following colors:
# !
# ! AMPA brownish
# ! NMDA golden orange
# ! GABA_A jet colormap from red (max) to blue (0)
# ! GABA_B grey
# !
# ! **NB:** When passing an explicit colormap, parts outside the mask will be
# ! shown to the "bad" color of the colormap, usually the "bottom" color in the
# ! map. To let points outside the mask appear in white, set the bad color of
# ! the colormap; unfortunately, this modifies the colormap.
pylab.cm.hsv.set_bad(cpl.colormaps.bad_color)
ccb_syns = (
(cpl.SynType('AMPA', 1, 'maroon'),
cpl.SynType('NMDA', 1, (0.9, 0.5, 0.1))),
(cpl.SynType('GABA_A', -1, '0.7'),
cpl.SynType('GABA_B', 1, pylab.cm.hsv)))
ccb_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=ccb_syns)
ccb_cp.plot(aggrGroups=True)
pylab.show()
# ! Other configuration options
# ! ---------------------------
# !
# ! Some more adjustments are possible by setting certain module properties.
# ! Some of these need to be set before ConnectionPattern() is constructed.
# !
# ! Background color for masked parts of each patch
cpl.colormaps.bad_color = 'cyan'
# ! Background for layers
cpl.plotParams.layer_bg = (0.8, 0.8, 0.0)
# ! Resolution for patch computation
cpl.plotParams.n_kern = 5
# ! Physical size of patches: longest egde of largest patch, in mm
cpl.plotParams.patch_size = 40
# ! Margins around the figure (excluding labels)
cpl.plotParams.margins.left = 40
cpl.plotParams.margins.top = 30
cpl.plotParams.margins.bottom = 15
cpl.plotParams.margins.right = 30
# ! Fonts for layer and population labels
import matplotlib.font_manager as fmgr
cpl.plotParams.layer_font = fmgr.FontProperties(family='serif', weight='bold',
size='xx-large')
cpl.plotParams.pop_font = fmgr.FontProperties('small')
# ! Orientation for layer and population label
cpl.plotParams.layer_orientation = {'sender': 'vertical', 'target': 60}
cpl.plotParams.pop_orientation = {'sender': 'horizontal', 'target': -45}
# ! Font for legend titles and ticks, tick placement, and tick format
cpl.plotParams.legend_title_font = fmgr.FontProperties(family='serif',
weight='bold',
size='large')
cpl.plotParams.legend_tick_font = fmgr.FontProperties(family='sans-serif',
weight='light',
size='xx-small')
cpl.plotParams.legend_ticks = [0, 1, 2]
cpl.plotParams.legend_tick_format = '%.1f pA'
cx_cp = cpl.ConnectionPattern(c_layer, c_conn)
cx_cp.plot(colorLimits=[0, 2])
pylab.show()
# ! Several more options are available to control the format of the color bars
# ! (they all are members of plotParams):
# ! * legend_location : if 'top', place synapse name atop color bar
# ! * cbwidth : width of single color bar relative to figure
# ! * margins.colbar : height of lower margin set aside for color bar, in mm
# ! * cbheight : height of single color bar relative to margins.colbar
# ! * cbwidth : width of single color bar relative to figure width
# ! * cbspace : spacing between color bars, relative to figure width
# ! * cboffset : offset of first color bar from left margin, relative to
# ! figure width
# ! You can also specify the width of the final figure, but this may not work
# ! well with on-screen display or here in pyreport. Width is in mm.
# ! Note that left and right margin combined are 70mm wide, so only 50mm are
# ! left for the actual CPT.
cx_cp.plot(fixedWidth=120)
pylab.show()
# ! If not using pyreport, we finally show and block
if not using_pyreport:
print("")
print("The connplotter_tutorial script is done. " +
"Call pylab.show() and enjoy the figures!")
print(
"You may need to close all figures manually " +
"to get the Python prompt back.")
print("")
pylab.show = pylab_show
| gpl-2.0 |
comprna/SUPPA | scripts/generate_boxplot_event.py | 1 | 5584 | # The next script will format a phenotype table (junctions, events, trasncripts...)
# for runnning FastQTL analysis
#This version is for formatting the SCLC phenotype
"""
@authors: Juan L. Trincado
@email: juanluis.trincado@upf.edu
generate_boxplot_event.py: Generates a boxplot with the PSI values, given which samples are in which conditions
"""
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import re
from argparse import ArgumentParser, RawTextHelpFormatter
description = \
"Description:\n\n" + \
"This script accept a phenotype table (junctions, events, transcripts...)\n" + \
"and a genotype table (mutations associated to K-mers or SMRs) and returns a formatted table\n" + \
"for using with FastQTL"
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter,
add_help=True)
parser.add_argument("-i", "--input", required=True,
help="Input file")
parser.add_argument("-e", "--event", required=True, type=str,
help="Event to plot")
parser.add_argument('-g', '--groups',
action="store",
required=True,
type=str,
nargs="*",
help="Ranges of column numbers specifying the replicates per condition. "
"Column numbers have to be continuous, with no overlapping or missing columns between them. "
"Ex: 1-3,4-6")
parser.add_argument('-c', '--conds',
action="store",
required=False,
default="0",
type=str,
nargs="*",
help="Name of each one of the conditions. Ex: Mutated,Non_mutated")
parser.add_argument("-o", "--output", required=True,
help="Output path")
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def main():
args = parser.parse_args()
input_file = args.input
event = args.event
groups = re.findall(r"[\w]+", args.groups[0])
output_path = args.output
# input_file = "/home/juanluis/Desktop/Work/Master_class/events.psi"
# event = "ENSG00000149554;SE:chr11:125496728-125497502:125497725-125499127:+"
# groups = ['1','3','4','6']
# output_path = "/home/juanluis/Desktop/Work/Master_class/"
try:
logger.info("Reading input file...")
dict_PSI = {}
cond = 1
success = False
file = open(input_file)
for line in file:
tokens = line.rstrip().split("\t")
if (tokens[0]==event):
success = True
for i,x in enumerate(groups):
if(i%2==1):
continue
PSI = []
samples = range(int(groups[i]),int(groups[i+1])+1)
#Get the PSI of this group of samples
for j in samples:
PSI.append(tokens[j])
dict_PSI[cond] = PSI
cond = cond + 1
break
if(success):
#Create the boxplot
data_to_plot = []
for key in dict_PSI.keys():
data_to_plot.append(list(map(float,dict_PSI[key])))
# Create a figure instance
fig = plt.figure(figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot, patch_artist=True, sym='')
# change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='.', color='#000000', alpha=0.7)
# Assign different colors
colors = ['lightblue', 'pink']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
for j in range(len(data_to_plot)):
y = data_to_plot[j]
x = np.random.normal(1 + j, 0.02, size=len(y))
plt.plot(x, y, 'ko', alpha=0.5)
# Custom x-axis labels if the user has input conditions
if (args.conds != "0"):
conditions = re.findall(r"[\w]+", args.conds[0])
ax.set_xticklabels(conditions)
# Leave just ticks in the bottom
ax.get_xaxis().tick_bottom()
ax.set_ylabel('PSI')
# Set the title
title = "Event: " + event
ax.set_title(title, fontsize=10)
# Add a horizontal grid to the plot,
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# Set the limits for the y axes
ax.set_ylim([-0.05, 1.05])
# Save the figure
output_path = output_path + "/" + event + ".png"
logger.info("Created " + output_path)
fig.savefig(output_path, bbox_inches='tight')
else:
logger.info("Event not found.")
logger.info("Done.")
exit(0)
except Exception as error:
logger.error(repr(error))
logger.error("Aborting execution")
sys.exit(1)
if __name__ == '__main__':
main() | mit |
OshynSong/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
kazemakase/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
aflaxman/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 33 | 3875 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for Bayesian Ridge Regression
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot true weights, estimated weights, histogram of the weights, and
# predictions with standard deviations
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True,
edgecolor='black')
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=0.1)
clf_poly = BayesianRidge()
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial Bayesian Ridge Regression", linewidth=lw)
plt.plot(X_plot, y_plot, color='gold', linewidth=lw,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| bsd-3-clause |
ueshin/apache-spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
nwjs/chromium.src | tools/perf/experimental/representative_perf_test_limit_adjuster/adjust_upper_limits.py | 1 | 6803 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import os
import sys
import shutil
import subprocess
import tempfile
CHROMIUM_PATH = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
TOOLS_PERF_PATH = os.path.join(CHROMIUM_PATH, 'tools', 'perf')
sys.path.insert(1, TOOLS_PERF_PATH)
from core.external_modules import pandas
RUNS_USED_FOR_LIMIT_UPDATE = 30
CHANGE_PERCENTAGE_LIMIT = 0.01
SWARMING_PATH = os.path.join(
CHROMIUM_PATH, 'tools', 'swarming_client', 'swarming.py')
UPPER_LIMITS_DATA_DIR = os.path.join(
CHROMIUM_PATH, 'testing', 'scripts', 'representative_perf_test_data')
def FetchItemIds(tags, limit):
"""Fetches the item id of tasks described by the tags.
Args:
tags: The tags which describe the task such as OS and buildername.
limit: The number of runs to look at.
Returns:
A list containing the item Id of the tasks.
"""
swarming_attributes = (
'tasks/list?tags=name:rendering_representative_perf_tests&tags=os:{os}'
'&tags=buildername:{buildername}&tags=master:chromium.gpu.fyi&state='
'COMPLETED&fields=cursor,items(task_id)').format(**tags)
query = [
SWARMING_PATH, 'query', '-S', 'chromium-swarm.appspot.com', '--limit',
str(limit), swarming_attributes]
output = json.loads(subprocess.check_output(query))
return output.get('items')
def FetchItemData(task_id, benchmark, index, temp_dir):
"""Fetches the performance values (AVG & CI ranges) of tasks.
Args:
task_id: The list of item Ids to fetch dat for.
benchmark: The benchmark these task are on (desktop/mobile).
index: The index field of the data_frame
temp_dir: The temp directory to store task data in.
Returns:
A data_frame containing the averages and confidence interval ranges.
"""
output_directory = os.path.abspath(
os.path.join(temp_dir, task_id))
query = [
SWARMING_PATH, 'collect', '-S', 'chromium-swarm.appspot.com',
'--task-output-dir', output_directory, task_id]
try:
subprocess.check_output(query)
except Exception as e:
print(e)
result_file_path = os.path.join(
output_directory, '0', 'rendering.' + benchmark, 'perf_results.csv')
try:
df = pandas.read_csv(result_file_path)
df = df.loc[df['name'] == 'frame_times']
df = df[['stories', 'avg', 'ci_095']]
df['index'] = index
return df
except:
print("CSV results were not produced!")
def GetPercentileValues(benchmark, tags, limit, percentile):
"""Get the percentile value of recent runs described by given tags.
Given the tags, benchmark this function fetches the data of last {limit}
runs, and find the percentile value for each story.
Args:
benchmark: The benchmark these task are on (desktop/mobile).
tags: The tags which describe the tasks such as OS and buildername.
limit: The number of runs to look at.
percentile: the percentile to return.
Returns:
A dictionary with averages and confidence interval ranges calculated
from the percentile of recent runs.
"""
items = []
for tag_set in tags:
items.extend(FetchItemIds(tag_set, limit))
dfs = []
try:
temp_dir = tempfile.mkdtemp('perf_csvs')
for idx, item in enumerate(items):
dfs.append(FetchItemData(item['task_id'], benchmark, idx, temp_dir))
idx += 1
finally:
shutil.rmtree(temp_dir)
data_frame = pandas.concat(dfs, ignore_index=True)
if not data_frame.empty:
avg_df = data_frame.pivot(index='stories', columns='index', values='avg')
upper_limit = avg_df.quantile(percentile, axis = 1)
ci_df = data_frame.pivot(index='stories', columns='index', values='ci_095')
upper_limit_ci = ci_df.quantile(percentile, axis = 1)
results = {}
for index in avg_df.index:
results[index] = {
'avg': round(upper_limit[index], 3),
'ci_095': round(upper_limit_ci[index], 3)
}
return results
def MeasureNewUpperLimit(old_value, new_value, att_name, max_change):
# There has been an improvement.
if new_value < old_value:
# Decrease the limit gradually in case of improvements.
new_value = (old_value + new_value) / 2.0
change_pct = 0.0
if old_value > 0:
change_pct = (new_value - old_value) / old_value
print(
' {}:\t\t {} -> {} \t({:.2f}%)'.format(
att_name, old_value, new_value, change_pct * 100))
if new_value < 0.01:
print('WARNING: New selected value is close to 0.')
return (
round(new_value, 3),
max(max_change, abs(change_pct))
)
def RecalculateUpperLimits(data_point_count):
"""Recalculates the upper limits using the data of recent runs.
This method replaces the existing JSON file which contains the upper limits
used by representative perf tests if the changes of upper limits are
significant.
Args:
data_point_count: The number of runs to use for recalculation.
"""
with open(os.path.join(UPPER_LIMITS_DATA_DIR,
'platform_specific_tags.json')) as tags_data:
platform_specific_tags = json.load(tags_data)
with open(
os.path.join(
UPPER_LIMITS_DATA_DIR,
'representatives_frame_times_upper_limit.json')) as current_data:
current_upper_limits = json.load(current_data)
max_change = 0.0
results = {}
for platform in platform_specific_tags:
platform_data = platform_specific_tags[platform]
print('\n- Processing data ({})'.format(platform))
results[platform] = GetPercentileValues(
platform_data['benchmark'], platform_data['tags'],
data_point_count, 0.95)
# Loop over results and adjust base on current values.
for story in results[platform]:
if story in current_upper_limits[platform]:
print(story, ':')
new_avg, max_change = MeasureNewUpperLimit(
current_upper_limits[platform][story]['avg'],
results[platform][story]['avg'], 'AVG', max_change)
results[platform][story]['avg'] = new_avg
new_ci, max_change = MeasureNewUpperLimit(
current_upper_limits[platform][story]['ci_095'],
results[platform][story]['ci_095'], 'CI', max_change)
results[platform][story]['ci_095'] = new_ci
if max_change > CHANGE_PERCENTAGE_LIMIT:
with open(
os.path.join(
UPPER_LIMITS_DATA_DIR,
'representatives_frame_times_upper_limit.json'
), 'w') as outfile:
json.dump(results, outfile, separators=(',', ': '), indent=2)
print(
'Upper limits were updated on '
'representatives_frame_times_upper_limit.json')
else:
print('Changes are small, no need for new limits')
if __name__ == '__main__':
sys.exit(RecalculateUpperLimits(RUNS_USED_FOR_LIMIT_UPDATE)) | bsd-3-clause |
iohannez/gnuradio | gr-filter/examples/synth_to_chan.py | 7 | 3891 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6, fh=0.925*(4*fs)/2.0)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps) / nchans))
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in range(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = numpy.blackman
#winfunc = numpy.hamming
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pyplot.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
breisfeld/avoplot | examples/adv_sine_wave.py | 3 | 8650 | import numpy
import matplotlib.pyplot as plt
import math
from avoplot import plugins, series, controls, subplots
from avoplot.gui import widgets
import wx
plugin_is_GPL_compatible = True
class TrigFuncSubplot(subplots.AvoPlotXYSubplot):
def my_init(self):
"""
When defining your own subplot classes, you should not need to override
the __init__ method of the base class. Instead you should define a
my_init() method which takes no args. This will be called automatically
when the subplot is created. Use this to customise the subplot to suit
your specific needs - settings titles, axis formatters etc.
"""
#call the parent class's my_init() method. This is not required, unless
#you want to make use of any customisation done by the parent class.
#Note that this includes any control panels defined by the parent class!
super(TrigFuncSubplot, self).my_init()
#set up some axis titles
ax = self.get_mpl_axes()
ax.set_xlabel(r'$\theta$ (radians)')
ax.set_ylabel('y')
#add the units control panel to this subplot to allow the user to change
#the x-axis units.
self.add_control_panel(TrigSubplotUnitsCtrl(self))
#set the initial name of the subplot
self.set_name("Trig. Function Subplot")
class SineWaveSeries(series.XYDataSeries):
"""
Define our own data series type for Sine data. Unlike for subplots, when
defining custom data series, we do override the __init__ method.
"""
def __init__(self, *args, **kwargs):
super(SineWaveSeries, self).__init__(*args, **kwargs)
#add a control for this data series to allow the user to change the
#frequency of the wave using a slider.
self.add_control_panel(SineWaveFreqCtrl(self))
@staticmethod
def get_supported_subplot_type():
"""
This is how we restrict which data series can be plotted into which
types of subplots. Specialised subplots may provide controls for dealing
with very specific types of data - for example, our TrigFuncSubplot
allows the x-axis to be switched between degrees and radians, it would
therefore make no sense to allow time series data to be plotted into it.
However, it might make sense to allow a SineWaveSeries to be plotted
into a general AvoPlotXYSuplot, and therefore this is permitted by
AvoPlot. The rule is as follows:
A data series may be plotted into a subplot if the subplot is an
instance of the class returned by its get_supported_subplot_type()
method or any of its base classes.
"""
return TrigFuncSubplot
class AdvExamplePlugin(plugins.AvoPlotPluginSimple):
"""
This class is the same as that used for the Sine wave example, except
that we use the SineWaveSeries data series class that we defined above
rather than the generic XYDataSeries class used before.
"""
def __init__(self):
super(AdvExamplePlugin, self).__init__("Example Plugin with Controls",
SineWaveSeries)
self.set_menu_entry(['Examples', 'Adv. Sine Wave'],
"Plot a sine wave with variable frequency")
def plot_into_subplot(self, subplot):
x_data = numpy.linspace(0, 7, 500)
y_data = numpy.sin(x_data)
data_series = SineWaveSeries("adv sine wave", xdata=x_data,
ydata=y_data)
subplot.add_data_series(data_series)
return True
def rad2deg(theta, pos):
"""
Function for converting radians to degrees for use with matplotlib's
FuncFormatter object.
"""
return '%0.2f'%math.degrees(theta)
class TrigSubplotUnitsCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for trig function subplots allowing their x axis units
to be changed from radians to degrees.
"""
def __init__(self, subplot):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(TrigSubplotUnitsCtrl, self).__init__("Units")
#store the subplot object that this control panel is associated with,
#so that we can access it later
self.subplot = subplot
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(TrigSubplotUnitsCtrl, self).setup(parent)
#create a choice box for the different units for the x axis
#we use a avoplot.gui.widgets.ChoiceSetting object which is a
#thin wrapper around a wx.ChoiceBox, but provides a label and
#automatically registers the event handler.
units_choice = widgets.ChoiceSetting(self, "x-axis units:", "Radians",
["Radians", "Degrees"],
self.on_units_change)
#add the choice widget to the control panel sizer
self.Add(units_choice, 0,wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border=10)
def on_units_change(self, evnt):
"""
Event handler for change of x axis units events.
"""
#get the matplotlib axes object from the subplot
ax = self.subplot.get_mpl_axes()
#change the axis labels and label formatting based on the choice of
#units
if evnt.GetString() == 'Degrees':
ax.set_xlabel(r'$\theta$ (degrees)')
ax.xaxis.set_major_formatter(plt.FuncFormatter(rad2deg))
else:
ax.set_xlabel(r'$\theta$ (radians)')
ax.xaxis.set_major_formatter(plt.ScalarFormatter())
#draw our changes in the display
self.subplot.update()
class SineWaveFreqCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for sine wave data series allowing their frequency to
be changed using a slider.
"""
def __init__(self, series):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(SineWaveFreqCtrl, self).__init__("Freq.")
#store the data series object that this control panel is associated with,
#so that we can access it later
self.series = series
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(SineWaveFreqCtrl, self).setup(parent)
#create a label for the slider
label = wx.StaticText(self, wx.ID_ANY, 'Frequency')
self.Add(label, 0,
wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL,
border=10)
#create a frequency slider
self.slider = wx.Slider(self, wx.ID_ANY, value=1, minValue=1,
maxValue=30, style=wx.SL_LABELS)
#add the slider to the control panel's sizer
self.Add(self.slider, 0,
wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, border=10)
#register an event handler for slider change events
wx.EVT_COMMAND_SCROLL(self, self.slider.GetId(), self.on_slider_change)
def on_slider_change(self, evnt):
"""
Event handler for frequency slider change events.
"""
#change the frequency of the sine wave data accordingly
f = self.slider.GetValue()
x_data = numpy.linspace(0, 7, 2000)
y_data = numpy.sin(x_data * f)
#change the data in the series object
self.series.set_xy_data(xdata=x_data, ydata=y_data)
#draw our changes on the display
self.series.update()
#register the plugin with AvoPlot
plugins.register(AdvExamplePlugin())
| gpl-3.0 |
mojoboss/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
JeroenZegers/Nabu-MSSS | nabu/postprocessing/reconstructors/deepclusteringnoise_reconstructor.py | 1 | 5366 | """@file deepclusteringnoise_reconstructor.py
contains the reconstor class using deep clustering for modified noise architecture"""
from sklearn.cluster import KMeans
import mask_reconstructor
from nabu.postprocessing import data_reader
import numpy as np
import os
class DeepclusteringnoiseReconstructor(mask_reconstructor.MaskReconstructor):
"""the deepclusteringnoise reconstructor class for modified architecture for noise
a reconstructor using deep clustering"""
requested_output_names = ['bin_emb', 'noise_filter']
def __init__(self, conf, evalconf, dataconf, rec_dir, task, optimal_frame_permutation=False):
"""DeepclusteringnoiseReconstructor constructor
Args:
conf: the reconstructor configuration as a dictionary
evalconf: the evaluator configuration as a ConfigParser
dataconf: the database configuration
rec_dir: the directory where the reconstructions will be stored
task: name of task
"""
super(DeepclusteringnoiseReconstructor, self).__init__(
conf, evalconf, dataconf, rec_dir, task, optimal_frame_permutation)
if 'noise_threshold_for_kmeans' in conf:
self.noise_threshold = float(conf['noise_threshold_for_kmeans'])
else:
self.noise_threshold = 0.75
if 'min_kmeans_perc' in conf:
self.min_kmeans_perc = float(conf['min_kmeans_perc'])
else:
self.min_kmeans_perc = 0.05
# get the usedbins reader
usedbins_names = conf['usedbins'].split(' ')
usedbins_dataconfs = []
for usedbins_name in usedbins_names:
usedbins_dataconfs.append(dict(dataconf.items(usedbins_name)))
self.usedbins_reader = data_reader.DataReader(usedbins_dataconfs, self.segment_lengths)
# directory where cluster centroids will be stored
self.center_store_dir = os.path.join(rec_dir, 'cluster_centers')
if not os.path.isdir(self.center_store_dir):
os.makedirs(self.center_store_dir)
def _get_masks(self, output, utt_info):
"""estimate the masks
Args:
output: the output of a single utterance of the neural network
utt_info: some info on the utterance
Returns:
the estimated masks"""
embeddings = output['bin_emb'] # Embeddingvectors
noise_filter = output['noise_filter'] # noise filter output network (alpha)
# only the non-silence bins will be used for the clustering
usedbins, _ = self.usedbins_reader(self.pos)
[T, F] = np.shape(usedbins)
emb_dim = np.shape(embeddings)[1]/F
if np.shape(embeddings)[0] != T:
raise Exception('Number of frames in usedbins does not match the sequence length')
if np.shape(noise_filter)[0] != T:
raise Exception('Number of frames in noise detect does not match the sequence length')
if np.shape(noise_filter)[1] != F:
raise Exception('Number of noise detect outputs does not match number of frequency bins')
# reshape the embeddings vectors
embeddings = embeddings[:T, :]
embeddings_resh = np.reshape(embeddings, [T*F, emb_dim])
embeddings_resh_norm = np.linalg.norm(embeddings_resh, axis=1, keepdims=True)
embeddings_resh = embeddings_resh/embeddings_resh_norm
if np.isnan(embeddings_resh).any():
print 'Embedding reshape contains NaN'
# reshape noise filter
noise_filter = noise_filter[:T, :]
noise_filter_resh = np.reshape(noise_filter, T*F)
# which cells have not too much noise
no_noise = noise_filter_resh > self.noise_threshold
# only keep the active bins (above threshold) for clustering and not too noisy
usedbins_resh = np.reshape(usedbins, T*F)
filt = np.logical_and(usedbins_resh, no_noise)
perc_for_kmeans = float(np.sum(filt))/float(np.sum(usedbins_resh))
if perc_for_kmeans < self.min_kmeans_perc:
print \
'Found that less then %.1f%% (%.1f%%)of the tf bins with energy where considered non-noise for the Kmeans. ' \
'Lowering the noise threshold so that %.1f%% of the bins will be considered' % \
(self.min_kmeans_perc*100, perc_for_kmeans*100, self.min_kmeans_perc*100)
num_bins_wanted = int(np.ceil(np.sum(usedbins_resh)*self.min_kmeans_perc))
noise_filt_used_bin = noise_filter_resh * usedbins_resh
sorted_noise_filt_used_bin_inds = np.argsort(noise_filt_used_bin)
sorted_noise_filt_used_bin_inds = sorted_noise_filt_used_bin_inds[::-1]
filt = sorted_noise_filt_used_bin_inds[:num_bins_wanted]
embeddings_speech_resh = embeddings_resh[filt]
if np.isnan(embeddings_speech_resh).any():
print 'embeddings_speech_resh contains NaN'
if np.shape(embeddings_speech_resh)[0] < 2:
return np.zeros([self.nrS, T, F])
# apply kmeans clustering and assign each bin to a cluster
kmeans_model = KMeans(n_clusters=self.nrS, init='k-means++', n_init=10, max_iter=100, n_jobs=-1)
# Sometime it fails due to some indexerror and I'm not sure why. Just retry then. max 5 times
for _ in range(5):
try:
kmeans_model.fit(embeddings_speech_resh)
except IndexError:
continue
break
# assign each cell to cluster
predicted_labels = kmeans_model.predict(embeddings_resh)
predicted_labels_resh = np.reshape(predicted_labels, [T, F])
# reconstruct the masks from the cluster labels
masks = np.zeros([self.nrS, T, F])
for spk in range(self.nrS):
masks[spk, :, :] = (predicted_labels_resh == spk)*noise_filter
if np.isnan(masks).any():
print 'masks contains NaN'
# store the clusters
np.save(os.path.join(self.center_store_dir, utt_info['utt_name']), kmeans_model.cluster_centers_)
return masks
| mit |
arg-hya/taxiCab | Tools/Misc/TaskPointGenerator.py | 1 | 1502 | import json
import shapefile as shp
import matplotlib.pyplot as plt
import random
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
numbersX = []
numbersY = []
TaskPoints = {}
shpFilePath = r"D:\TaxiCab\mycode\Plots\ShapefileAndTrajectory\taxi_zones\taxi_zones"
sf = shp.Reader(shpFilePath)
records = sf.records()
plt.figure()
for shape in sf.shapeRecords():
#print(records[0][3])
x = [i[0] for i in shape.shape.points[:]]
meanX = mean(x)
numbersX.append(meanX)
y = [i[1] for i in shape.shape.points[:]]
meanY = mean(y)
numbersY.append(meanY)
plt.plot(x,y)
num = 0 ##range(263)
for x, y in zip(numbersX, numbersY):
plt.text(x, y, str(num), color="red", fontsize=12)
num = num + 1
plt.plot(numbersX, numbersY, 'o', color='blue', markersize=7, markeredgewidth=0.0)
#print (len(numbersX))
#print (numbersY)
plt.show()
Date_min = 1
Date_max = 30
Beta_min = 2
Beta_max = 30
#print (range(len(numbersX)))
for i in range(len(numbersX)):
date = "2017/1/"
TaskPoints_trace = []
TaskPoints_trace.append(records[i][3])
TaskPoints_trace.append(numbersX[i])
TaskPoints_trace.append(numbersY[i])
TaskPoints_trace.append(random.randint(Beta_min, Beta_max))
date += str(random.randint(Date_min, Date_max))
TaskPoints_trace.append(date)
TaskPoints[i] = TaskPoints_trace
json.dump(TaskPoints, open('Data1/TaxiZone_TaskPoints.json', 'w'), indent=4, sort_keys=True, separators=(',', ':'))
| gpl-3.0 |
dalejung/edamame | edamame/tools/follow.py | 1 | 9062 | import inspect
import gc
import sys
import os.path
import difflib
from collections import OrderedDict
import pandas as pd
from pandas.core.common import in_ipnb
def is_property(code):
"""
Using some CPython gc magics, check if a code object is a property
gc idea taken from trace.py from stdlib
"""
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
gc.collect()
code_refs = gc.get_referrers(code)
funcs = [f for f in code_refs
if inspect.isfunction(f)]
if len(funcs) != 1:
return False
# property object will reference the original func
props = [p for p in gc.get_referrers(funcs[0])
if isinstance(p, property)]
return len(props) == 1
def is_class_dict(dct):
if not isinstance(dct, dict):
return False
if '__dict__' not in dct or not inspect.isgetsetdescriptor(dct['__dict__']):
return False
return True
def get_parent(code):
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
if len(funcs) != 1:
return None
refs = [f for f in gc.get_referrers(funcs[0])]
for ref in refs:
# assume if that if a dict is pointed to by a class,
# that dict is the __dict__
if isinstance(ref, dict):
parents = [p for p in gc.get_referrers(ref) if isinstance(p, type)]
if len(parents) == 1:
return parents[0].__name__
if inspect.ismethod(ref):
return ref.__qualname__.rsplit('.', 1)[0]
return None
class Follow(object):
"""
Follows execution path.
Meant as a quick way to see what a function does.
In [2]: with Follow() as f:
...: df.sum()
...:
In [3]: f.pprint(depth=2)
stat_func generic.py:3542
_reduce frame.py:3995
_get_axis_number generic.py:285
_get_agg_axis frame.py:4128
as_matrix generic.py:1938
"""
def __init__(self, depth=1, silent=False, parent=False):
self.depth = depth
self.silent = silent
self.timings = []
self.frame_cache = {}
self._caller_cache = {}
self.parent = parent
self.stack_depth_cache = {}
def current_depth(self, frame):
current_depth = None
i = 0
f = frame.f_back
while f:
i += 1
parent_depth = self.stack_depth_cache.get(id(f), None)
if parent_depth is not None:
current_depth = i + parent_depth
break
# if we're already past depth, don't bother finding real depth
if i > self.depth:
return None
f = f.f_back
# should always at least get back to base parent
return current_depth
def trace_dispatch(self, frame, event, arg):
if len(self.stack_depth_cache) == 0:
# __enter__ is the intial frame
self.stack_depth_cache[id(frame.f_back)] = 0
# the lower parts get heavy. don't do anything or frames deeper
# than depth
current_depth = self.current_depth(frame)
if current_depth is None:
return
if current_depth > self.depth:
return
if event not in ['call', 'c_call']:
return
# skip built in funcs
if inspect.isbuiltin(arg):
return
# skip properties, we're only really interested in function calls
# this will unfortunently skip any important logic that is wrapped
# in property logic
code = frame.f_code
if is_property(code):
return
# note that get_parent is supa slow.
parent_name = None
if self.parent:
parent_name = get_parent(code)
indent, first_parent = self.indent_level(frame)
f = frame.f_back
if event == "c_call":
func_name = arg.__name__
fn = (indent, "", 0, func_name, id(frame),id(first_parent), None)
elif event == 'call':
fcode = frame.f_code
fn = (indent, fcode.co_filename, fcode.co_firstlineno,
fcode.co_name, id(frame), id(first_parent), parent_name)
self.timings.append(fn)
def indent_level(self, frame):
i = 0
f = frame.f_back
first_parent = f
while f:
if id(f) in self.frame_cache:
i += 1
f = f.f_back
if i == 0:
# clear out the frame cache
self.frame_cache = {id(frame): True}
else:
self.frame_cache[id(frame)] = True
return i, first_parent
def to_frame(self):
data = self.timings
cols = ['indent', 'filename', 'lineno', 'func_name', 'frame_id',
'parent_id', 'parent_name']
df = pd.DataFrame(data, columns=cols)
df.loc[:, 'filename'] = df.filename.apply(lambda s: os.path.basename(s))
return df
def __enter__(self):
sys.setprofile(self.trace_dispatch)
return self
def __exit__(self, type, value, traceback):
sys.setprofile(None)
if not self.silent:
self.pprint(self.depth)
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def gen_output(self, depth=None):
df = self.to_frame()
mask = df.filename == ''
mask = mask | df.func_name.isin(['<lambda>', '<genexpr>'])
mask = mask | df.func_name.str.startswith('__')
if depth:
mask = mask | (df.indent > depth)
MSG_FORMAT = "{indent}{func_name}{class_name} <{filename}:{lineno}>"
df = df.loc[~mask]
def format(row):
indent = row[0]
filename = row[1]
lineno = row[2]
func_name = row[3]
class_name = row[6] or ''
if class_name:
class_name = '::{class_name}'.format(class_name=class_name)
msg = MSG_FORMAT.format(indent=" "*indent*4, func_name=func_name,
filename=filename, lineno=lineno,
class_name=class_name)
return msg
df = df.reset_index(drop=True)
output = df.apply(format, axis=1, raw=True)
return output.tolist()
def pprint(self, depth=None):
output = self.gen_output(depth=depth)
print(("-" * 40))
print(("Follow Path (depth {depth}):".format(depth=depth)))
print(("-" * 40))
print(("\n".join(output)))
def diff(self, right, depth):
if in_ipnb():
return self._html_diff(right=right, depth=depth)
else:
return self._text_diff(right=right, depth=depth)
def _text_diff(self, right, depth):
output = self.gen_output(depth)
output2 = right.gen_output(depth)
htmldiff = difflib.HtmlDiff()
return '\n'.join(difflib.ndiff(output, output2))
def _html_diff(self, right, depth):
from IPython.core.display import HTML
output = self.gen_output(depth)
output2 = right.gen_output(depth)
htmldiff = difflib.HtmlDiff()
diff = htmldiff.make_table(output, output2)
return HTML(diff)
| mit |
krasch/smart-assistants | examples/visualize_habits.py | 1 | 1689 | # -*- coding: UTF-8 -*-
"""
Plot visualization of user habits, i.e. show which actions typically follow some given user action.
Note: the figure for "Frontdoor=Closed" slightly deviates from Figure 1 in the paper and Figure 5.1 in the
dissertation (see paper_experiments.py for bibliographical information). The number of observed actions was reported
correctly in the paper/dissertation, however there was an issue with ordering which actions occur most commonly,
which resulted in "Open cups cupboard" being erroneously included in the figure. Despite this issue, the main point
of the figure still stands: the user has some observable habits after closing the frontdoor.
"""
import sys
sys.path.append("..")
import pandas
from recsys.classifiers.temporal import TemporalEvidencesClassifier
from recsys.classifiers.binning import initialize_bins
from recsys.dataset import load_dataset
from evaluation import plot
import config
#configuration
data = load_dataset("../datasets/houseA.csv", "../datasets/houseA.config")
#fit classifier to dataset
cls = TemporalEvidencesClassifier(data.features, data.target_names, bins=initialize_bins(0, 300, 10))
cls = cls.fit(data.data, data.target)
#create visualizations of habits around each user action
plot_conf = plot.plot_config(config.plot_directory, sub_dirs=[data.name, "habits"], img_type=config.img_type)
for source in cls.sources.values():
observations = pandas.DataFrame(source.temporal_counts)
observations.columns = data.target_names
observations.index = cls.bins
plot.plot_observations(source.name(), observations, plot_conf)
print "Results can be found in the \"%s\" directory" % config.plot_directory
| mit |
walterreade/scikit-learn | sklearn/datasets/tests/test_base.py | 33 | 7160 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
| bsd-3-clause |
elidrc/PSO | test_pso.py | 1 | 1192 | from benchmark_functions import *
from pso import *
import matplotlib.pyplot as plt
iterations = 100
particles = 500
dimensions = 2
search_space = [[-5.12] * dimensions, [5.12] * dimensions]
# print init_pso(iterations, particles, search_space)
velocity, fitness, local_best, local_position, global_best, global_position = init_pso(iterations, particles,
search_space)
# print create_swarm(particles, search_space)
swarm = create_swarm(particles, search_space)
iteration = 0
while iteration < iterations:
fitness = [sphere(solution) for solution in swarm]
local_best, local_position = update_local_position(swarm, fitness, local_best, local_position)
global_best, global_position = update_global_position(swarm, local_best, global_best, global_position, iteration)
swarm, velocity = update_swarm(swarm, velocity, local_position, global_position, iteration)
swarm = check_swarm(swarm, search_space)
iteration += 1
plt.plot(global_best, '.-', label='%f' % min(global_best))
plt.xlim(-1, iteration)
# plt.ylim(min(global_best), max(global_best)+0.01)
plt.legend()
plt.show()
| mit |
nmartensen/pandas | pandas/tests/scalar/test_interval.py | 7 | 4026 | from __future__ import division
from pandas import Interval
import pytest
import pandas.util.testing as tm
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with tm.assert_raises_regex(TypeError, msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
def test_math_add(self, interval):
expected = Interval(1, 2)
actual = interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + interval
assert expected == actual
actual = interval
actual += 1
assert expected == actual
msg = "unsupported operand type\(s\) for \+"
with tm.assert_raises_regex(TypeError, msg):
interval + Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval + 'foo'
def test_math_sub(self, interval):
expected = Interval(-1, 0)
actual = interval - 1
assert expected == actual
actual = interval
actual -= 1
assert expected == actual
msg = "unsupported operand type\(s\) for -"
with tm.assert_raises_regex(TypeError, msg):
interval - Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval - 'foo'
def test_math_mult(self, interval):
expected = Interval(0, 2)
actual = interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * interval
assert expected == actual
actual = interval
actual *= 2
assert expected == actual
msg = "unsupported operand type\(s\) for \*"
with tm.assert_raises_regex(TypeError, msg):
interval * Interval(1, 2)
msg = "can\'t multiply sequence by non-int"
with tm.assert_raises_regex(TypeError, msg):
interval * 'foo'
def test_math_div(self, interval):
expected = Interval(0, 0.5)
actual = interval / 2.0
assert expected == actual
actual = interval
actual /= 2.0
assert expected == actual
msg = "unsupported operand type\(s\) for /"
with tm.assert_raises_regex(TypeError, msg):
interval / Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
| bsd-3-clause |
lbishal/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
rmhyman/DataScience | Lesson3/exploratory_data_analysis_subway_data.py | 1 | 1558 | import numpy as np
import pandas
import matplotlib.pyplot as plt
def entries_histogram(turnstile_weather):
'''
Before we perform any analysis, it might be useful to take a
look at the data we're hoping to analyze. More specifically, let's
examine the hourly entries in our NYC subway data and determine what
distribution the data follows. This data is stored in a dataframe
called turnstile_weather under the ['ENTRIESn_hourly'] column.
Let's plot two histograms on the same axes to show hourly
entries when raining vs. when not raining. Here's an example on how
to plot histograms with pandas and matplotlib:
turnstile_weather['column_to_graph'].hist()
Your histogram may look similar to bar graph in the instructor notes below.
You can read a bit about using matplotlib and pandas to plot histograms here:
http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
You can see the information contained within the turnstile weather data here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
plt.figure()
#print turnstile_weather['rain'] == 1
turnstile_weather[turnstile_weather['rain' ]== 0]['ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is raining
turnstile_weather[turnstile_weather['rain'] == 1]['ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is not raining
return plt
| mit |
josiahseaman/DNAResearch | Repeat_Graph.py | 1 | 8201 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
from IPython.core.display import Image
# <codecell>
data = []
for y in range(10):
data.append([y+x for x in range(10)])
# print(data)
Image(data=data)
# <headingcell level=1>
# Matplot Lib
# <codecell>
alpha = 0.7
phi_ext = 2 * pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * cos(phi_p)*cos(phi_m) - alpha * cos(phi_ext - 2*phi_p)
phi_m = linspace(0, 2*pi, 100)
phi_p = linspace(0, 2*pi, 100)
X,Y = meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*pi), Y/(2*pi), Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
# <codecell>
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import random
bignum = 10
data = []
for i in range(bignum):
data.append([random.random() for x in range(bignum)])
mat = np.array(data) #random.random((bignum, bignum))
X, Y = np.mgrid[:bignum, :bignum]
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
surf = ax.plot_surface(X,Y,mat)
plt.show()
# <headingcell level=2>
# Most simple Pixel Map
# <codecell>
def basic_func(x, y):
return x+y
X, Y = np.mgrid[:bignum, :bignum]
Z = basic_func(X, Y)
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X, Y, Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
# <headingcell level=2>
# Basic Repeat Map
# <codecell>
raster_width = 11
seq = 'TCTCGTGAACCGTTCTTTCCCGCGGACGTGATGGATGTGGGTGCCTTTATTTGCGACGATATGGTCCGTAAATTAGGTCTCGTTGTTTGTACCCTCTCACTTGGCCGCTTCAACTTTTTTCCGATAATGTCTAATGCACCGACGGAATTATTGTACAGAGTAGCAAGCTCAGGTTGCACGGCAGACCTTGCCGCGTCGGGTCTGCGCACCACCCAAATCTGGGCGCGTCTGGGCCTCGCTGCTACACTGGTTAACCATGCTTCAGACTCTGTGACGATGAAATATCCAAACGACGTTGAATAAAAACGACGGGGAGCGGCGGTGATTTTTATCAATCGCGGTGAAGCAGTTATGCTCGACATCTATTAACAACAGGAGAAAGGCGCCACCGCTCCGGTGTATTATACACTGGGCCGTTTGACCGTCTCATCGACGGGCAACATGACCAAACCGCACATGCATTTCTCGGGCCGAATCGCCCGCGCCTACTGGAAAGCCGGCTCTGGCGATTATGCCGATTTTGAAAGTTTTCTTTCATCCAAAGCGTATATTATTCAGTTTCAAATATCGACCTTGCTGAAGAAAATCAAAGTGATCTTTTTATTTAAAGGCAATGATGGCGATGTACTTAATCGTGCGATCGCTTTGCGGCAGGGCCCCCGTTGGAATAGATTTGATATGCAGGAGCTGTATCCGATCTGGCATATTCTGTCCAATTAACAGCGCAATATCAACGCTGCGCTGTCTCTGCTGGTCGGCGAACACGGACTGATTCAGTCTCCTTTGGCAGGTTTCGTACAAGGTACCACGCTGAGCGCCCTGGGCCAACGGGACTTTGCACTGCGTAAGGACGCAGTGGAAGTGGGCTCCCTGAACCCTGAAGCCGGTGAAGACAAACGTACGACCATCATCTTTACCTATGTACTGCAGCAGCAAGGTTACAAATCCGGTAAATGTTGCGGCGAGGATAAATATGACGTTATTCTGAAAGAAGGGATTATCTACTATACCGTAGTTCTGATCATCCGGGGCTTCAAAGATTCAGACAAGGACGAAGATGACGGACTTAAACATGCGCTTGAAGGATTCGAAGGCGAACGTGGCGCTGCTCTGTCGACTGTAGCATCCGCGTCCGCATGGAGGAGTGGTCAACATAACGGCACCACCCCTTCGTCAAAGGTGGCGCAAGAACTCCGCCAGAAACGCTGCAATTCCAATACAAACATCACCTGCCCACACGTAAACCTTGAACTTAACAAGATATATCGGCTCTTCCCGCTCCAAAACTAAAAGATACCGGACGTGATCGCGATCAGAGGCAAATACTTGACTCATAAGCTGTCAACGGTTGATTTACTGGGTTTTTCTCCGCCAACCTGTCTGCGCTTGCATGATTATGAAGCCGTGTCAGATCCGATGAAAGTGGCGAATTTCCATAACCAGATGGGTTTCTTGGTAGGCGATGCCATCTTCGTTCAGGAACTCATCAAACAGACGGTCGCGCTGATCATTAACAAAGTAAAAAACCCTGGTGGCCTGAAACAGCGAGCCTCAGAAAAACCGAACTCTCAGCTAGTTTGAGGTGGGTCTAATCATGAGCCAGCACTGCGCGACCGTGGGTCTCGTATTCTGGGTGAGCGCGTGCGTGACGATATTCTGTATCTTGTTAACATGGGTTTTAAACATTCGTTCTTGGCTGACCGTGTCATCATGATCAAGATTGAAGAAGAGCTGCATTTTCATACCCAGAGCTACGAGGTCACCTCGCTCGGACAGGGGGTCAGTAATTACCTGGTCACAGCCGATGCGAAAGCCCCAAAACGTCGCCAACTGGCATATCATCTTGGTACTGGGTTCTCATCATTCTACGCTGGGGCGGATGATCAGGCGTCGCGCGTGGAAGTCAAACAGATGCAACGGATCCTGATTGCAGCCGCCCTGCCGGGCCTCCGAAAGAAATTGCGCCTGGATGCACACAATGAATTTATTGTCCCAATCATGACCGAGTTCGACCAGACCGGCCCCTTAACCTTAGGCTACGCATCAGAAAAACGCGCGCTCGATAACATCATGGTGAGTCAGGATTCTGTGCTGGGGAATCTCTTTATGAAATTTTTAGGTGTGCTGGTGGTCGGTATCAGCCGGACAGCGATAGCGGACCCAGATAAGTATATGGCTATTCTGCTGGGTGCGGTTTTCGACATGCTGGCGATGAAAATCATTGAAGTCTTAGATGTTACGTCCAACCGCAACTATTTGACCAATCGCCGTACGACGGAAATCGCAGCTGTGGCAGAAACCTGTGAGGACGGAGCGTTTGTGATGCTGCTGACCACGTGGCTGGGCAAGAAGTCGGATTCCCTGAAGTTCCCTAACTTAGTGATTGTCTATTATATAGTTATGGTCGGCGGCCCGTGCACCGGAGAGCAGCAGAAACGTGCTACAGCAGCCATGAGTAGCGAAATTGCGCTCCAGCCGTATTTCCGCTTCCGCCGGATTGAGCACACTGTCCGCGGCCGCGTCTTTTGACTGGAAAAAAGTTTCGGCGAAGACGCCGGCGATAATCTGGTCTCCAACAAAACCAAACGTCGCGGTAAAGGGCCGCAGTTTAAATATGTGGAACTGGCAGAACTGACCTTAATCAAGCTGTCGATTTGAGGCGGTGTAGCTAACATGGGAGGTAATGCACGTCATGGAATGAAAGGCATTCTGGGTCCGCTGCGCGTTGCCTCTTTAGCTTATCAGGCGAAAGGTGTCATCGGTTTATCTATGTTAAAAAACTGGGCTCCGGCCTAACAAAAAAATCTGCTGTCAGTTGCTGTACTGGTCCCGCTGAGCGCGAGCACAGGGAGCGCCCTGGAAATGGTGCGCGGTCTGAAAGAAGGCAACGCAGTCTTGGTGGCGAAGATGGGGATCGCCAAAGGAGCGACAGGTCGCTGGGCGGCTGTGGCAGATGGTAACGTCGCACCTCCGCTTCGCGAGCAATTAAACTTTCAGGCT'
# <codecell>
seq = 'CTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTACTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGC'
# <codecell>
#seq[1*x_size : 1*x_size + raster_width]
seq[7:15]
# <codecell>
sum([True, False, True,True])
# <codecell>
raster_width = 11
x_size = 75 # frequency range
y_size = int(len(seq) / raster_width) # number of lines: (cut off the end)
raster_width
# <codecell>
def repeat_score(x, y):
start_str = seq[y*raster_width : (y+1)*raster_width]
target_str = seq[y*raster_width + x : (y+1)*raster_width + x]
actual_width = min(len(start_str), len(target_str))
return sum([start_str[i] == target_str[i] for i in range(actual_width)])
# <codecell>
[[repeat_score(x,y) for x in range(1,x_size-1)] for y in range(y_size-10)]
# <codecell>
X, Y = np.mgrid[:x_size, :y_size]
Z = np.array([[repeat_score(x,y) for x in range(1,x_size+1)] for y in range(y_size)]).T
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X, Y, Z,
cmap=cm.Greys_r,
vmin=0, vmax=raster_width)
cb = fig.colorbar(p, ax=ax)
# <codecell>
x, y = 20, 7
print( seq[y*x_size : y*x_size + raster_width])
print( seq[y*x_size + x : y*x_size + raster_width + x])
sum([start_str[i] == target_str[i] for i in range(raster_width)])
# <headingcell level=3>
# Notes
# <markdowncell>
# I most of the trouble that I had make this was because I am unfamiliar with NumPy arrays and matplotlib. The lines for Z and p = ax.pcolor(X, Y, Z, cmap=cm.Greys_r, vmin=0, vmax=raster_width) are very sensitive. The good and the bad of having a graphing platform is that I get scale axes for free. It will often squish the pixels. I prefer square pixels. I need to figure out how to generate a highly non-square graph since the Repeat Map is usually 25 wide x 200 high.
# <headingcell level=1>
# Finished Product
# <codecell>
from Sequence_Utils import debugSequence, weighted_sequence
# <codecell>
class RepeatMap():
def __init__(self, sequence):
self.seq = sequence
self.raster_width = 11
self.x_size = 25 # frequency range
self.y_size = int(len(self.seq) / self.raster_width) # number of lines: (cut off the end)
def repeat_score(self, x, y):
start_str = self.seq[y*self.raster_width : (y+1)*self.raster_width]
target_str = self.seq[y*self.raster_width + x : (y+1)*self.raster_width + x]
actual_width = min(len(start_str), len(target_str))
return sum([start_str[i] == target_str[i] for i in range(actual_width)])
def render(self):
X, Y = np.mgrid[:self.x_size, :self.y_size]
Z = np.array([[self.repeat_score(x,y) for x in range(1,self.x_size+1)] for y in range(self.y_size)]).T
fig, ax = plt.subplots()
fig.set_size_inches(self.x_size /10, self.y_size /10)
p = ax.pcolor(X, Y, Z,
cmap=cm.Greys_r,
vmin=0, vmax=self.raster_width)
cb = fig.colorbar(p, ax=ax)
plt.gca().invert_yaxis()
# <codecell>
rp = RepeatMap(debugSequence(25, 200, 5))
rp.render()
# <codecell>
| apache-2.0 |
CharlesGulian/Deconv | fits_tools_tesla.py | 1 | 5575 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 21:18:54 2016
@author: charlesgulian
"""
import os
#os.chdir('/Users/annepstein/Work/Deconv')
curr_dir = os.getcwd()
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
#from photutils import aperture_photometry
#from photutils import CircularAperture
def binImage(pixelArray,M=3,N=3):
'''
- Bins pixels along image axes into MxN bins (default MxN = 3x3)
'''
pixels = pixelArray
imgDim1,imgDim2 = np.shape(pixels)
xBinSize,yBinSize = float(imgDim1)/float(M),float(imgDim2)/float(N)
imgBinDict = {} # Dictionary for storing
#print xBinSize,yBinSize
for i in range(M):
for j in range(N):
imgBinDict[i+1,j+1] = pixels[int(np.ceil(i*xBinSize)):int(np.floor((i+1)*xBinSize)),\
int(np.ceil(j*yBinSize)):int(np.floor((j+1)*yBinSize))]
#print ''
#print 'Bin: ',i,j
#print 'Shape: ',np.shape(imgBinDict[i,j])
return imgBinDict
def computeObjectFlux(x0,y0,radius,image):
'''
- Compute total flux within circular aperture of the given radius
from source at image coordinates (x0,y0)
'''
position = (x0,y0)
aperture = CircularAperture(position,r=radius)
return aperture_photometry(image,aperture)[0][0]
# getPixels() can be replaced by fits.getdata() (I did not know this)
def getPixels(image_file,delete=False):
hdulist = fits.open(image_file)
data = hdulist[0].data
hdulist.close()
if delete:
del hdulist[0].data
return data
def applyMask(image,mask):
'''
- Apply a binary mask to an array
'''
masked_image = np.multiply(image,mask)
return masked_image
def maskImage(image_file,mask_file,masked_image_file=None,Return=False):
'''
- Takes a .fits image file and .fits binary mask file as input
- Applies binary mask to .fits image data
- Rewrites masked image to new .fits file (masked_image_file)
'''
image = fits.getdata(image_file)
mask = fits.getdata(mask_file)
masked_image = applyMask(image,mask)
inds = np.where(masked_image == 0.0)
masked_image[inds] += 1e-12 # Prevent NaNs
if masked_image_file == None:
masked_image_file = image_file.replace('.fits','_masked.fits').replace('Good','MaskedImages').replace('Bad','MaskedImages')
fits.writeto(masked_image_file,masked_image,fits.getheader(image_file),clobber=True)
if Return:
return masked_image
def shift_image(image,x_offset,y_offset):
# Shifts image pixels from (x,y) to (x-x_offset),(y-y_offset)
dims = np.shape(image) # Image dimensions
dim1,dim2 = dims[0],dims[1]
blank = np.zeros(dims) + 1e-8 # Define blank array to receive new image data
shifted_image = blank
dy,dx = x_offset,y_offset # These are intentionally reversed
for i in range(dim1):
for j in range(dim2):
if (i+dx < dim1) and (i+dx >= 0) and (j+dy < dim2) and (j+dy >= 0):
shifted_image[i,j] = image[i+dx,j+dy] # Why does this work?
return shifted_image
def subtractBias(image_file,new_image_file=None,bias=0.0,Return=False):
'''
- Takes a .fits image file as input
- Subtracts median from image data, writes new data to new image file (new_image_file)
'''
if new_image_file == None:
new_image_file = image_file
image = fits.getdata(image_file)
image -= bias
fits.writeto(new_image_file,image,fits.getheader(image_file),clobber=True)
if Return:
return image
def subtractMedian(image_file,new_image_file=None,Return=False):
'''
- Takes a .fits image file as input
- Subtracts median from image data, writes new data to new image file (new_image_file)
'''
if new_image_file == None:
new_image_file = image_file
image = fits.getdata(image_file)
image -= np.median(image)
fits.writeto(new_image_file,image,fits.getheader(image_file),clobber=True)
if Return:
return image
def write_pixel_offset(x_offset,y_offset,image_file,new_image_file=None):
# Add (x,y) pixel offset to .FITS header of an image
header = fits.getheader(image_file) # Get original .FITS header
header['x_offset'] = x_offset # Add new keywords and values to header
header['y_offset'] = y_offset
# If no new image file specified, default writes new header to original image header
if new_image_file == None:
new_image_file = image_file
# Write header to new image
fits.writeto(new_image_file,fits.getdata(image_file),header,clobber=True)
'''
# Testing:
test_image_file = 'AstroImages/Good/fpC-6484-x4078-y134_stitched_alignCropped.fits'
test_image = fits.getdata(test_image_file)
catalog = 'Results/fpC-6484-x4078-y134_stitched_alignCropped_fpC-6484-x4078-y134_stitched_alignCropped_compare.cat'
import sex_stats
fig = sex_stats.data(catalog)
x = fig.get_data('X_IMAGE')
y = fig.get_data('Y_IMAGE')
xlow = np.where(x > 651.0)
xhigh = np.where(x < 658.9)
xin = np.intersect1d(xlow,xhigh)
ylow = np.where(y > 820.0)
yhigh = np.where(y < 826.0)
yin = np.intersect1d(ylow,yhigh)
obj = np.intersect1d(xin,yin)
DATA = fig.Data
x,y = 848.39102,727.23274
radius = 10.
flux = computeObjectFlux(x,y,radius,test_image)
print flux
#testMask = 'AstroImages/Masks/fpC-6484-x4078-y134_stitched_alignCropped_mask.fits'
#maskImage(testImage,testMask)
'''
| gpl-3.0 |
gdl-civestav-localization/cinvestav_location_fingerprinting | experimentation/__init__.py | 1 | 1691 | import os
import cPickle
import matplotlib.pyplot as plt
from datasets import DatasetManager
def plot_cost(results, data_name, plot_label):
plt.figure(plot_label)
plt.ylabel('Accuracy (m)', fontsize=30)
plt.xlabel('Epoch', fontsize=30)
plt.yscale('symlog')
plt.tick_params(axis='both', which='major', labelsize=20)
plt.grid(True)
for i in range(1, 2, 1):
y, x = zip(*results[i][data_name])
name = results[i]['Name']
plt.plot(x, y, label=name, linewidth=5.0)
plt.legend(fontsize='xx-large')
def get_metrics(test_set_y, predicted_values, model_name):
for i in xrange(len(predicted_values)):
print predicted_values[i][1]
if __name__ == '__main__':
"""
seed = 50
with open(os.path.join('experimentation', 'cinvestav_testbed_experiment_results_' + str(seed)), 'rb') as f:
results = cPickle.load(f)
plot_cost(
results=results,
data_name='cost_train',
plot_label='Cost on train phase')
plot_cost(
results=results,
data_name='cost_valid',
plot_label='Cost on valid phase')
plot_cost(
results=results,
data_name='cost_test',
plot_label='Cost on test phase')
plt.show()
"""
seed = 50
dataset, result = DatasetManager.read_dataset2('test_cleaned_dataset.csv', shared=True, seed=seed)
with open(os.path.join('trained_models', 'Logistic Regressionbrandeis_university.save'), 'rb') as f:
model = cPickle.load(f)
predicted_values = model.predict(dataset)
get_metrics(
test_set_y=result,
predicted_values=predicted_values,
model_name='Logistic Regression'
)
| gpl-3.0 |
Newsrecommender/newsrecommender | ArticleRecommendationProject/Recommendation/Collab_Content_Based.py | 1 | 5856 | import yaml
import pandas as pd
import numpy as np
import sys
import os
from math import sqrt
import matplotlib
import matplotlib.pyplot as plot
import networkx as nx
def get_script_directory():
"""
This function returns the directory of the script in scrip mode
In interactive mode returns interpreter name.
"""
path = os.path.realpath(sys.argv[0])
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
def similarity_score(Article1,Article2):
"""
This function calculates Euclidean distance between to objects
"""
both_viewed = {}
for item in dataset[Article1]:
if item in dataset[Article2]:
both_viewed[item] = 1
# The Conditions to check if they both have common rating items
if len(both_viewed) == 0:
return 0
# Finding Euclidean distance
sum_of_euclidean_distance = []
for item in dataset[Article1]:
if item in dataset[Article2]:
sum_of_euclidean_distance.append(pow(dataset[Article1][item] - dataset[Article2][item], 2))
sum_of_euclidean_distance = sum(sum_of_euclidean_distance)
#print (sum_of_euclidean_distance)
return 1/(1+sqrt(sum_of_euclidean_distance))
def pearson_correlation(Article1,Article2):
"""
This function calculates Pearson correlation between two vectors
"""
both_rated = {}
for item in dataset[Article1]:
if item in dataset[Article2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# Checking for number of ratings in common
if number_of_ratings == 0:
return 0
# Add up all the preferences of each user
person1_preferences_sum = sum([dataset[Article1][item] for item in both_rated])
person2_preferences_sum = sum([dataset[Article2][item] for item in both_rated])
# Sum up the squares of preferences of each user
person1_square_preferences_sum = sum([pow(dataset[Article1][item],2) for item in both_rated])
person2_square_preferences_sum = sum([pow(dataset[Article2][item],2) for item in both_rated])
# Sum up the product value of both preferences for each item
product_sum_of_both_users = sum([dataset[Article1][item] * dataset[Article2][item] for item in both_rated])
# Calculate the pearson score
numerator_value = product_sum_of_both_users - (person1_preferences_sum*person2_preferences_sum/number_of_ratings)
denominator_value = sqrt((person1_square_preferences_sum - pow(person1_preferences_sum,2)/number_of_ratings) * (person2_square_preferences_sum -pow(person2_preferences_sum,2)/number_of_ratings))
if denominator_value == 0:
return 0
else:
r = numerator_value/denominator_value
return r
def find_most_similar_objects(Article1,number_of_users):
# returns the number_of_users (similar persons) for a given specific person.
scores = [(pearson_correlation(Article1,other_person),other_person) for other_person in dataset if other_person != Article1 ]
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
return (scores[0:number_of_users][0][1])
def get_recommendations(objects, no_of_recommendations):
"""
This function generates recommendations for specified object
"""
recommended_articles = []
input_articles = []
for article in objects:
# print (article, find_most_similar_objects(article,2)[0][1], find_most_similar_objects(article,2)[1][1])
input_articles.append(article)
recommended_articles.append(find_most_similar_objects(article,no_of_recommendations))
return input_articles,recommended_articles
# Find the path of script
path = get_script_directory()
print ('Script is located at {}'.format(path))
os.chdir(path)
# import config files
print("Reading configuration")
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
user_ratings_files_path = cfg['project_test_conf']['ratings_file_path']
user_ratings_csv_filename = cfg['project_test_conf']['ratings_file_name']
articles_files_path = cfg['project_test_conf']['articles_file_path']
articles_csv_filename = cfg['project_test_conf']['articles_file_name']
ratings_index = cfg['project_test_conf']['ratings_index_column']
output_file_path = cfg['project_test_conf']['output_path']
output_file_name = cfg['project_test_conf']['output_file_name']
ratings_file = os.path.join(user_ratings_files_path, user_ratings_csv_filename)
articles_file = os.path.join(articles_files_path, articles_csv_filename)
Output_Recommendations = os.path.join(output_file_path, output_file_name)
print("Configuration loaded successfully")
print ('Reading ratings from file {}'.format(ratings_file))
user_ratings = pd.read_csv(ratings_file, index_col=ratings_index)
articles_db = pd.read_csv(articles_file, index_col=ratings_index)
objects_list = list(user_ratings.index)
user_ratings_T = user_ratings.transpose()
dataset = user_ratings_T.to_dict()
# Get recommendations
print('Calculations in progress...')
Article, recommended_article = get_recommendations(objects_list, 5)
print('Calculations completed.')
# Create output files
print('Creating output file')
recommended_article_title = []
for content in recommended_article:
recommended_article_title.append(articles_db.Title[content])
input_article_title = []
for content in Article:
input_article_title.append(articles_db.Title[content])
df = pd.DataFrame()
df['Article'] = Article
df['Recommendation'] = recommended_article
df['News'] = input_article_title
df['Recommended_News'] = recommended_article_title
df = df.set_index('Article', drop=True, append=False, inplace=False, verify_integrity=False)
df.to_csv(Output_Recommendations)
print('Output file created.')
print('Check output files at {}'.format(Output_Recommendations))
| mit |
mrcslws/htmresearch | projects/feedback/feedback_sequences_additional.py | 7 | 24229 |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file runs a number of experiments testing the effectiveness of feedback
with noisy inputs.
"""
import os
from copy import deepcopy
import numpy
import cPickle
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats
matplotlib.rcParams['pdf.fonttype'] = 42
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
import feedback_experiment
from feedback_experiment import FeedbackExperiment
def convertSequenceMachineSequence(generatedSequences):
"""
Convert a sequence from the SequenceMachine into a list of sequences, such
that each sequence is a list of set of SDRs.
"""
sequenceList = []
currentSequence = []
for s in generatedSequences:
if s is None:
sequenceList.append(currentSequence)
currentSequence = []
else:
currentSequence.append(s)
return sequenceList
def generateSequences(n=2048, w=40, sequenceLength=5, sequenceCount=2,
sharedRange=None, seed=42):
"""
Generate high order sequences using SequenceMachine
"""
# Lots of room for noise sdrs
patternAlphabetSize = 10*(sequenceLength * sequenceCount)
patternMachine = PatternMachine(n, w, patternAlphabetSize, seed)
sequenceMachine = SequenceMachine(patternMachine, seed)
numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength,
sharedRange=sharedRange )
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
return sequenceMachine, generatedSequences, numbers
def sparsenRange(sequenceMachine, sequences, startRange, endRange, probaZero):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p < endRange and p >= startRange:
newsdr = numpy.array(list(sdr))
keep = numpy.random.rand(len(newsdr)) > probaZero
newsdr = newsdr[keep==True]
newSequence.append(set(newsdr))
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def crossSequences(sequenceMachine, sequences, pos):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos:
newSequence.append(sequences[(numseq +1) % len(sequences)][p])
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addTemporalNoise(sequenceMachine, sequences, noiseStart, noiseEnd, noiseProba):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= noiseStart and p < noiseEnd:
newsdr = patternMachine.addNoise(sdr, noiseProba)
newSequence.append(newsdr)
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addPerturbation(sequenceMachine, sequences, noiseType, pos, number=1):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos and p < pos+number:
if noiseType == "skip":
pass
elif noiseType == "replace":
newsdr = patternMachine.addNoise(sdr, 1.0)
newSequence.append(newsdr)
elif noiseType == "repeat":
newSequence.append(s[p-1])
else:
raise("Unrecognized Noise Type!")
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def runInference(exp, sequences, enableFeedback=True, apicalTiebreak=True,
apicalModulationBasalThreshold=True, inertia=True):
"""
Run inference on this set of sequences and compute error
"""
if enableFeedback:
print "Feedback enabled: "
else:
print "Feedback disabled: "
error = 0
activityTraces = []
responses = []
for i,sequence in enumerate(sequences):
(avgActiveCells, avgPredictedActiveCells, activityTrace, responsesThisSeq) = exp.infer(
sequence, sequenceNumber=i, enableFeedback=enableFeedback, apicalTiebreak=apicalTiebreak,
apicalModulationBasalThreshold=apicalModulationBasalThreshold, inertia=inertia)
error += avgActiveCells
activityTraces.append(activityTrace)
responses.append(responsesThisSeq)
print " "
error /= len(sequences)
print "Average error = ",error
return error, activityTraces, responses
def runExp(noiseProba, numSequences, nbSeeds, noiseType, sequenceLen, sharedRange, noiseRange, whichPlot, plotTitle):
allowedNoises = ("skip", "replace", "repeat", "crossover", "pollute")
if noiseType not in allowedNoises:
raise(RuntimeError("noiseType must be one of the following: ".join(allowedNoises)))
meanErrsFB = []; meanErrsNoFB = []; meanErrsNoNoise = []
stdErrsFB = []; stdErrsNoFB = []; stdErrsNoNoise = []
meanPerfsFB = []; stdPerfsFB = []
meanPerfsNoFB = []; stdPerfsNoFB = []
stdsFB = []
stdsNoFB=[]
activitiesFB=[]; activitiesNoFB=[]
diffsFB = []
diffsNoFB = []
overlapsFBL2=[]; overlapsNoFBL2=[]
overlapsFBL2Next=[]; overlapsNoFBL2Next=[]
overlapsFBL4=[]; overlapsNoFBL4=[]
overlapsFBL4Next=[]; overlapsNoFBL4Next=[]
corrsPredCorrectFBL4=[]; corrsPredCorrectNoFBL4=[]
diffsFBL4Pred=[]; diffsNoFBL4Pred=[]
diffsFBL4PredNext=[]; diffsNoFBL4PredNext=[]
diffsFBL2=[]; diffsNoFBL2=[]
diffsFBL2Next=[]; diffsNoFBL2Next=[]
diffsNoAT = []; overlapsNoATL2=[]; overlapsNoATL2Next=[]; overlapsNoATL4=[]
overlapsNoATL4Next=[]
corrsPredCorrectNoATL4=[]; diffsNoATL4Pred=[]; diffsNoATL4PredNext=[]
diffsNoATL2=[]; diffsNoATL2Next=[]
diffsNoAM = []; overlapsNoAML2=[]; overlapsNoAML2Next=[]; overlapsNoAML4=[]
overlapsNoAML4Next=[]
corrsPredCorrectNoAML4=[]; diffsNoAML4Pred=[]; diffsNoAML4PredNext=[]
diffsNoAML2=[]; diffsNoAML2Next=[]
diffsNoIN = []; overlapsNoINL2=[]; overlapsNoINL2Next=[]; overlapsNoINL4=[]
overlapsNoINL4Next=[]
corrsPredCorrectNoINL4=[]; diffsNoINL4Pred=[]; diffsNoINL4PredNext=[]
diffsNoINL2=[]; diffsNoINL2Next=[]
errorsFB=[]; errorsNoFB=[]; errorsNoNoise=[]
perfsFB = []; perfsNoFB = []
#for probaZero in probaZeros:
seed = 42
for seedx in range(nbSeeds):
seed = seedx + 123
profile = False,
L4Overrides = {"cellsPerColumn": 8}
numpy.random.seed(seed)
# Create the sequences and arrays
print "Generating sequences..."
sequenceMachine, generatedSequences, numbers = generateSequences(
sequenceLength=sequenceLen, sequenceCount=numSequences,
sharedRange=sharedRange,
seed=seed)
sequences = convertSequenceMachineSequence(generatedSequences)
noisySequences = deepcopy(sequences)
# Apply noise to sequences
noisySequences = addTemporalNoise(sequenceMachine, noisySequences,
noiseStart=noiseRange[0], noiseEnd=noiseRange[1],
noiseProba=noiseProba)
# *In addition* to this, add crossover or single-point noise
if noiseType == "crossover":
noisySequences = crossSequences(sequenceMachine, noisySequences,
pos=sequenceLen/2)
elif noiseType in ("repeat", "replace", "skip"):
noisySequences = addPerturbation(sequenceMachine, noisySequences,
noiseType=noiseType, pos=sequenceLen/2, number=1)
inferenceErrors = []
#Setup experiment and train the network on sequences
print "Learning sequences..."
exp = FeedbackExperiment(
numLearningPasses= 2*sequenceLen, # To handle high order sequences
seed=seed,
L4Overrides=L4Overrides,
)
exp.learnSequences(sequences)
print "Number of columns in exp: ", exp.numColumns
print "Sequences learned!"
# Run inference without any noise. This becomes our baseline error
standardError, activityNoNoise, responsesNoNoise = runInference(exp, sequences)
inferenceErrors.append(standardError)
runError, activityFB, responsesFB = runInference(
exp, noisySequences, enableFeedback=True)
runError, activityNoFB, responsesNoFB = runInference(
exp, noisySequences, enableFeedback=False)
runError, activityNoAT, responsesNoAT = runInference(
exp, noisySequences, enableFeedback=True, apicalTiebreak=False)
runError, activityNoAT, responsesNoAM = runInference(
exp, noisySequences, enableFeedback=True, apicalModulationBasalThreshold=False)
runError, activityNoIN, responsesNoIN = runInference(
exp, noisySequences, enableFeedback=True, inertia=False)
# Now that actual processing is done, we compute various statistics and plot graphs.
seqlen = len(noisySequences[0])
sdrlen = 2048 * 8 # Should be the total number of cells in L4. Need to make this more parametrized!
for numseq in range(len(responsesNoNoise)):
diffsFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAT.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoATL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAM.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoAML4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoIN.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoINL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
cpcfb = []; cpcnofb=[]; cpcnoat=[]; cpcnoam=[]; cpcnoin=[];
for x in range(seqlen):
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcfb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnofb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAT[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoat.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAM[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoam.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoIN[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoin.append(numpy.corrcoef(z1, z2)[0,1])
# Note that the correlations are appended across all seeds and sequences
corrsPredCorrectNoFBL4.append(cpcnofb[1:])
corrsPredCorrectNoATL4.append(cpcnoat[1:])
corrsPredCorrectNoINL4.append(cpcnoin[1:])
corrsPredCorrectNoAML4.append(cpcnoam[1:])
corrsPredCorrectFBL4.append(cpcfb[1:])
# diffsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
print "Size of L2 responses (FB):", [len(responsesFB[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L2 responses (NoNoise):", [len(responsesNoNoise[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (FB):", [len(responsesFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoFB):", [len(responsesNoFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAT):", [len(responsesNoAT[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAM):", [len(responsesNoAM[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoIN):", [len(responsesNoIN[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoNoise):", [len(responsesNoNoise[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 predictions (FB):", [len(responsesFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoFB):", [len(responsesNoFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAT):", [len(responsesNoAT[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAM):", [len(responsesNoAM[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoIN):", [len(responsesNoIN[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoNoise):", [len(responsesNoNoise[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "L2 overlap with current (FB): ", overlapsFBL2[-1]
print "L4 overlap with current (FB): ", overlapsFBL4[-1]
print "L4 overlap with current (NoFB): ", overlapsNoFBL4[-1]
print "L4 correlation pred/correct (FB): ", corrsPredCorrectFBL4[-1]
print "L4 correlation pred/correct (NoFB): ", corrsPredCorrectNoFBL4[-1]
print "L4 correlation pred/correct (NoAT): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoAM): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoIN): ", corrsPredCorrectNoATL4[-1]
print "NoNoise sequence:", [list(x)[:2] for x in sequences[numseq]]
print "Noise sequence:", [list(x)[:2] for x in noisySequences[numseq]]
print "NoNoise L4 responses:", [list(x)[:2] for x in responsesNoNoise[numseq]['L4Responses']]
print "NoFB L4 responses:", [list(x)[:2] for x in responsesNoFB[numseq]['L4Responses']]
print ""
plt.figure()
allDataSets = (corrsPredCorrectFBL4, corrsPredCorrectNoFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
allmeans = [numpy.mean(x) for x in allDataSets]
allstds = [numpy.std(x) for x in allDataSets]
nbbars = len(allmeans)
plt.bar(2*(1+numpy.arange(nbbars))-.5, allmeans, 1.0, color='r', edgecolor='none', yerr=allstds, capsize=5, ecolor='k')
for nn in range(1, nbbars):
plt.vlines([2, 2 +2*nn], 1.2, 1.2+(nn/10.0), lw=2); plt.hlines(1.2+(nn/10.0), 2, 2+2*nn, lw=2)
pval = scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(allDataSets[nn]).ravel())[1]
if pval > 0.05:
pvallabel = ' o' #r'$o$'
elif pval > 0.01:
pvallabel = '*'
elif pval > 0.001:
pvallabel = '**'
else:
pvallabel = '***'
plt.text(3, 1.2+(nn/10.0)+.02, pvallabel, fontdict={"size":14})
plt.xticks(2*(1+numpy.arange(nbbars)), ('Full', 'No\nFB', 'No Earlier\nFiring', 'No Thresold\nModulation', 'No Slower\nDynamics'))
plt.ylabel("Avg. Prediction Performance");
plt.title(plotTitle)
plt.savefig(plotTitle+".png")
# scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(corrsPredCorrectNoATL4).ravel())
plt.show()
return (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4, corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
if __name__ == "__main__":
plt.ion()
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, no shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, no shared range")
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, shared range")
#
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, no shared range")
| agpl-3.0 |
tracierenea/gnuradio | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
jakevdp/klsh | klsh/hamming_ann.py | 1 | 6489 | """
This is a set of classes to perform fast (approximate) nearest neighbors
searches over Hamming spaces.
[1] M. Charikar. Similarity Estimation Techniques from Rounding Algorithms.
ACM Symposium on Theory of Computing, 2002.
"""
__all__ = ["HammingANN", "HammingBrute", "HammingBallTree"]
import numpy as np
from scipy.spatial import distance
from sklearn.neighbors import BallTree
from .utils import create_rng, packbits_axis, unpackbits_axis, hamming_cdist
class HammingSearchBase(object):
"""Base class for Hamming neighbors search"""
def fit(self, X):
raise NotImplementedError('HammingSearchBase.fit')
def query(self, X, k, return_dist=False):
raise NotImplementedError('HammingSearchBase.query')
@staticmethod
def _validate_input(X, return_compact=True):
X = np.atleast_2d(np.asarray(X, dtype=np.uint8))
if X.ndim != 2:
raise ValueError("Input hamming array must be two dimensions")
if return_compact:
return packbits_axis(X)
else:
X[X != 0] = 1
return X
class HammingBrute(HammingSearchBase):
def __init__(self, compact=False):
self.compact = compact
def fit(self, X):
"""Fit a set of hamming vectors
Parameters
----------
X : array_like
an array of size (n_features, n_bits). Nonzero entries will be
evaluated as 1, and zero entries as 0
"""
if self.compact:
self._fit_X = self._validate_input(X)
else:
self._fit_X = self._validate_input(X, False)
return self
def query(self, X, k, return_dist=False):
if self.compact:
X = self._validate_input(X)
cdist = hamming_cdist(X, self._fit_X)
else:
X = self._validate_input(X, False)
cdist = distance.cdist(X, self._fit_X, 'hamming')
ind = np.argsort(cdist, 1)[:, :k]
if return_dist:
rows = np.arange(ind.shape[0])[:, np.newaxis]
dist = cdist[rows, ind]
if not self.compact:
dist = (dist * X.shape[1]).astype(int)
return ind, dist
else:
return ind
class HammingBallTree(HammingSearchBase):
def __init__(self, leaf_size=40, query_kwds=None):
self.leaf_size = leaf_size
self.query_kwds = query_kwds or {}
def fit(self, X):
X = self._validate_input(X, return_compact=False)
self._tree = BallTree(X, metric='hamming', leaf_size=self.leaf_size)
return self
def query(self, X, k, return_dist=False):
X = self._validate_input(X, return_compact=False)
if return_dist:
dist, ind = self._tree.query(X, k, return_distance=True)
return ind, (dist * X.shape[1]).astype(int)
else:
return self._tree.query(X, k, return_distance=False)
class HammingANN(HammingSearchBase):
def __init__(self, epsilon=0.5, random_state=None):
self.epsilon = epsilon
self.random_state = random_state
def fit(self, X):
"""Fit a set of hamming vectors
Parameters
----------
X : array_like
an array of size (n_features, n_bits). Nonzero entries will be
evaluated as 1, and zero entries as 0
"""
self._X_fit = self._validate_input(X, False)
self._X_fit_compact = packbits_axis(self._X_fit)
N, n_bits = self._X_fit.shape
# choose number of permutations based on epsilon
M = 2 * int(np.ceil(N ** (1. / (1. + self.epsilon))))
rng = create_rng(self.random_state)
P_indices = np.array([rng.choice(n_bits, n_bits, replace=False)
for i in range(M)])
# P_compact will be of shape (M, X.shape[0]), and contains
# M bit-permutations applied across all the keys
P = self._X_fit[:, P_indices]
P_compact = packbits_axis(P).T
# Do a lexicographic sort of all the permuted bits.
# Here's where cython would help immensely. We could store just
# M permutation-bit arrays, and write a custom sort & binary search
# which will work on these permutations and orderings.
sort_indices = np.argsort(P_compact, 1)
P_compact_sorted = P_compact[np.arange(M)[:, None], sort_indices]
unsort_indices = np.argsort(sort_indices, 1)
#----------------- just a sanity check (TODO: REMOVE THIS)
reordered = P_compact_sorted[np.arange(M)[:, np.newaxis],
unsort_indices]
assert np.all(reordered == P_compact)
#---------------------------------------------------------
self._sort_indices = sort_indices
self._unsort_indices = unsort_indices
self._P_compact_sorted = P_compact_sorted
return self
def query(self, X, k, return_dist=False):
"""Query a set of distances
Parameters
----------
X : array_like
an [n_samples, n_bits] array of hamming features. These will be
interpreted as zeros and ones.
"""
X_compact = self._validate_input(X)
nbrs = np.zeros([X_compact.shape[0], k], dtype=int)
if return_dist:
dist = np.zeros_like(nbrs)
M, N = self._P_compact_sorted.shape
# TODO: MAKE THIS MORE EFFICIENT
for i, val in enumerate(X_compact):
# find ordered index within each random permutation
P_indices = np.array([np.searchsorted(self._P_compact_sorted[j],
val) for j in range(M)])
# get upper/lower indices within each permutation
ind_uplo = np.clip(np.vstack([P_indices, P_indices + 1]), 0, N-1)
# from indices within the sorted permutations, find the
# unique set of indices from the original set of hashes
ind_to_check = np.unique(self._sort_indices[range(M), ind_uplo])
# compute hamming distances for these points, and put into results
distances = hamming_cdist(val, self._X_fit_compact[ind_to_check])
nearest = np.argsort(distances[0])[:k]
nbrs[i, :len(nearest)] = ind_to_check[nearest]
if return_dist:
dist[i, :len(nearest)] = distances[0, nearest[:k]]
if return_dist:
return nbrs, dist
else:
return nbrs
| bsd-3-clause |
niltonlk/nest-simulator | pynest/examples/spatial/test_3d.py | 14 | 2140 | # -*- coding: utf-8 -*-
#
# test_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
A spatial network in 3D
-------------------------
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
nest.ResetKernel()
pos = nest.spatial.free(nest.random.uniform(-0.5, 0.5), extent=[1.5, 1.5, 1.5])
l1 = nest.Create('iaf_psc_alpha', 1000, positions=pos)
# visualize
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*nest.GetPosition(l1))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b')
# full connections in box volume [-0.2,0.2]**3
nest.Connect(l1, l1,
{'rule': 'pairwise_bernoulli',
'p': 1.,
'allow_autapses': False,
'mask': {'box': {'lower_left': [-0.2, -0.2, -0.2],
'upper_right': [0.2, 0.2, 0.2]}}})
# show connections from center element
# sender shown in red, targets in green
ctr = nest.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*nest.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = nest.GetPosition(ctr)
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = nest.GetTargetNodes(ctr, l1)[0]
distances = nest.Distance(ctr, l1)
tgt_distances = [d for i, d in enumerate(distances) if i + 1 in tgts]
plt.figure()
plt.hist(tgt_distances, 25)
plt.show()
| gpl-2.0 |
ChengeLi/VehicleTracking | utilities/embedding.py | 1 | 3427 | #### embedding
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, MDS
from mpl_toolkits.mplot3d import Axes3D
class embeddings(obj):
def __init__(self, model,data):
self.modelChoice = model
self.data = data
# self.data = FeatureMtx_norm
def PCA_embedding(self,n_components):
print 'PCA projecting...'
self.pca = PCA(n_components= n_components,whiten=False)
self.embedding_ = self.model.fit(data)
# self.pca = PCAembedding(self.data,50)
# FeatureAfterPCA = self.pca.transform(self.data)
def TSNE_embedding(self,n_components):
# tsne = TSNE(n_components=2, perplexity=30.0)
tsne3 = TSNE(n_components=n_components, perplexity=30.0)
# tsne_data = tsne.fit_transform(FeatureAfterPCA50)
tsne3_data = tsne3.fit_transform(FeatureAfterPCA50)
# pickle.dump(tsne_data,open(DataPathobj.DataPath+'/tsne_data.p','wb'))
# tsne_data = pickle.load(open(DataPathobj.DataPath+'/tsne_data.p','rb'))
self.embedding_ = tsne3_data
def MDS_embedding(self,n_components):
self.mds = MDS(n_components=n_components, max_iter=100, n_init=1)
MDS_data = self.mds.fit_transform(FeatureAfterPCA50)
def LLE_embedding(self):
"""locally linear embedding_"""
# self.lle = sklearn.manifold.LocallyLinearEmbedding(n_neighbors=5, n_components=self.n_dimension, reg=0.001, eigen_solver='auto', tol=1e-06, max_iter=100,
# method='standard', hessian_tol=0.0001, modified_tol=1e-12, neighbors_algorithm='auto', random_state=None)
# self.embedding_ = self.lle.fit_transform(data_sampl_*feature_)
"""use DPGMM or Spectral labels"""
sscfile = loadmat(DataPathobj.sscpath+'001.mat')
labels_DPGMM = csr_matrix(sscfile['labels_DPGMM_upup'], shape=sscfile['labels_DPGMM_upup'].shape).toarray()
labels_spectral = csr_matrix(sscfile['labels_spectral_upup'], shape=sscfile['labels_spectral_upup'].shape).toarray()
trjID = csr_matrix(sscfile['trjID_upup'], shape=sscfile['trjID_upup'].shape).toarray()
"""use connected_components labels"""
adjfile = loadmat(DataPathobj.adjpath+'20knn&thresh_Gaussian_diff_dir_001.mat')
labels_CC = csr_matrix(adjfile['c_upup'], shape=adjfile['c_upup'].shape).toarray()
"""use fake ground truth labels"""
arrange_index = pickle.load(open(DataPathobj.DataPath+'/arrange_index.p','rb'))
# labels_fakeGT = labels_CC[arrange_index]
labels_fakeGT = np.zeros_like(labels_CC)
for ii in range(0,int(labels_fakeGT.shape[1]/20),1):
labels_fakeGT[0,arrange_index[20*ii:min(20*(ii+1),labels_fakeGT.shape[1])]] = ii
# labels_fakeGT[0,5*ii:min(5*(ii+1),labels_fakeGT.shape[1])] = ii
def visEmbedding(self):
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# labels = labels_DPGMM
# labels = labels_spectral
# labels = labels_CC
labels = labels_fakeGT
# data = MDS_data
data = tsne_data
clustered_color = np.array([np.random.randint(0,255) for _ in range(3*int(len(np.unique(labels))))]).reshape(len(np.unique(labels)),3)
plt.figure()
for ii in range(labels.shape[1]):
plt.scatter(data[ii,0],data[ii,1],color=(clustered_color[int(labels[0,ii])].T/255.0))
plt.draw()
| mit |
ZENGXH/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
iABC2XYZ/abc | DM_Twiss/TwissTrain3.py | 2 | 4285 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 13:37:16 2017
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
Check that the Distribution generation method is right.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from Orth import LambdaR,OrthTrans
from TFOrth import TFLambdaR,TFOrthTrans
plt.close('all')
emitX=4.8
alphaX=-2.3
betaX=15.3
gammaX=(1.+alphaX**2)/betaX
diagRX=LambdaR(emitX,alphaX,betaX,gammaX)
PX=OrthTrans(emitX,alphaX,betaX,gammaX)
numPart=np.int32(1e5)
Z=np.random.randn(2,numPart)
X=np.matmul(np.matmul(PX,np.linalg.inv(diagRX)),Z)
plt.figure(1)
plt.plot(X[0,:],X[1,:],'r.')
plt.axis('equal')
##
def WeightP(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def WeightLambda2D():
lambda1=tf.Variable(tf.random_uniform([1,1]),dtype=tf.float32)
lambda2=tf.Variable(tf.random_uniform([1,1]),dtype=tf.float32)
O=tf.reshape(tf.constant(0,tf.float32),[1,1])
LambdaR1=tf.concat([lambda1,O],0)
LambdaR2=tf.concat([O,lambda2],0)
LambdaR=tf.concat([LambdaR1,LambdaR2],1)
return LambdaR
P_1=WeightP([2,2])
LambdaR=WeightLambda2D()
xI=tf.placeholder(tf.float32,[2,None])
xL1=tf.matmul(P_1,xI)
xO=tf.matmul(LambdaR,xL1)
xR=xO[0]**2+xO[1]**2
lossXR=(xR-2.)**2
rateLearn=5e-4
optXR=tf.train.AdamOptimizer(rateLearn)
trainXR=optXR.minimize(lossXR)
meanLossXR=tf.reduce_mean(lossXR)
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess.run(tf.global_variables_initializer())
sizeBatch=64
for _ in xrange(30000):
startBatch=np.random.randint(0,high=numPart-sizeBatch-1)
xFeed=X[:,startBatch:startBatch+sizeBatch:]
sess.run(trainXR,feed_dict={xI:xFeed})
#print(sess.run(LambdaR))
#print('---------------------------')
print(sess.run(meanLossXR,feed_dict={xI:X}))
print('_______________________________________________')
'''
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.clf()
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
plt.pause(0.001)
'''
LambdaRGet=sess.run(LambdaR)
print(LambdaRGet)
print('---------------------------')
print(1./(LambdaRGet[0,0]*LambdaRGet[1,1]))
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
'''
print(sess.run(P_1))
print(sess.run(LambdaR))
print(sess.run(xR,feed_dict={xI:X}))
'''
'''
wEmit=tf.Variable([emitX])
wAlpha=tf.Variable([alphaX])
wBeta=tf.Variable([betaX])
wGamma=tf.Variable([gammaX])
'''
'''
wEmit=tf.Variable([13.])
wAlpha=tf.Variable([1.3])
wBeta=tf.Variable([0.5])
#wGamma=tf.Variable([0.5])
wGamma=(1.+wAlpha**2)/wBeta
xH=tf.placeholder(tf.float32,[2,None])
diagR,diagRT=TFLambdaR(wEmit,wAlpha,wBeta,wGamma)
P,PI=TFOrthTrans(wEmit,wAlpha,wBeta,wGamma)
zH=tf.matmul(tf.matmul(diagR,PI),xH)
R=zH[0]**2+zH[1]**2
#lossR=tf.abs(R-2.e-6)
lossR=R
optR=tf.train.GradientDescentOptimizer(0.01)
trainR=optR.minimize(lossR)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
#sess.run(diagR)
print(sess.run(R,feed_dict={xH:X}))
numIter=10
recEmit=np.zeros(numIter)
recAlpha=np.zeros(numIter)
recBeta=np.zeros(numIter)
recGamma=np.zeros(numIter)
recLoss=np.zeros(numIter)
for _ in xrange(numIter):
sess.run(trainR,feed_dict={xH:X})
recEmit[_]=sess.run(wEmit)
recAlpha[_]=sess.run(wAlpha)
recBeta[_]=sess.run(wBeta)
recGamma[_]=sess.run(wGamma)
recLoss[_]=sess.run(tf.reduce_mean(lossR))
print(recEmit)
print(recAlpha)
#print(sess.run(R,feed_dict={xH:X}))
plt.figure('emit')
plt.plot(recEmit)
plt.figure('alpha')
plt.plot(recAlpha)
plt.figure('beta')
plt.plot(recBeta)
plt.figure('gamma')
plt.plot(recGamma)
plt.figure('Loss')
plt.plot(recLoss)
'''
'''
zGet=sess.run(zH,feed_dict={xH:X})
print(sess.run(lossR,feed_dict={xH:X}))
'''
'''
plt.figure('Check')
plt.hold('on')
plt.plot(Z[0,:],Z[1,:],'bo')
plt.plot(zGet[0,:],zGet[1,:],'r.')
plt.axis('equal')
'''
'''
print(sess.run(wEmit))
print(sess.run(wAlpha))
print(sess.run(wBeta))
print(sess.run(wGamma))
print(sess.run(diagR))
print(sess.run(diagRT))
'''
#print(PX)
#print(sess.run(P))
#print(sess.run(zH,feed_dict={xH:X}))
| gpl-3.0 |
asi-uniovi/malloovia | malloovia/lpsolver.py | 1 | 33503 | # coding: utf-8
# import pandas as pd
"""Malloovia interface to LP solver"""
from typing import Sequence, List, Any
from itertools import product as cartesian_product
from inspect import ismethod
from collections import namedtuple
from uuid import uuid4
import os
import pulp # type: ignore
from pulp import (
LpContinuous,
LpInteger,
LpVariable,
lpSum,
LpProblem,
LpMinimize,
LpMaximize,
PulpSolverError,
COIN_CMD,
log,
subprocess,
)
from .solution_model import (
MallooviaHistogram,
ReservedAllocation,
AllocationInfo,
Status,
pulp_to_malloovia_status,
)
from .model import System, Workload, App, TimeUnit
LpProblem.bestBound = None # Add new attribute to pulp problems
class MallooviaLp:
"""Solves the allocation problem, using Linear Programming.
This class contains methods to create a linear programming problem
(using PuLP), to add restrictions and extra variables to it,
to solve it (using PuLP supported solvers), and to retrieve
the solution in a format amenable to further analysis and display.
The LP problem instantiates these variables:
- For reserved instances: ``Y_(_a,_ic)``, where ``Y`` is a fixed prefix,
``a`` is a string representation of each application and ``ic`` is the string
representation of each reserved instance class considered.
After solving the LP problem, the value of the variable is the number of
reserved machines of instance class `ic` for application `a`, for the whole
reservation period.
- For on-demand instances: ``X_(_a,_ic,_l)``, where ``X`` is a fixed prefix,
``a`` is a string representation of each application, ``ic`` is the string
representation of each on-demand instance class considered and ``l`` is a
string representation of a "workload tuple", which is a tuple of numbers,
e.g: ``(1230, 442, 123)``, each one representing the workload of one of the apps.
After solving the LP problem, the value of the variable is the number of
on-demand machines of instance class `ic` deployed for application `a` at a
timeslot which has a workload prediction equal to the tuple ``l``.
Intended usage:
1. Instantiate the class (see constructor parameters below).
2. Call object's ``.create_problem()``.
3. Call object's ``.solve()``.
4. Retrieve solution by calling object's ``.get_allocation()`` to get the solution
for all variables, or ``.get_reserved_allocation()`` to get ony the number of
reserved instances of each type.
5. Retrieve the cost of the solution via object's ``.get_solution()``.
You can use object's property ``pulp_problem`` to access the PuLP problem object
which represents the linear programming problem, to inspect or save it if required.
"""
def __init__(
self,
system: System,
workloads: Sequence[Workload],
preallocation: ReservedAllocation = None,
relaxed: bool = False,
) -> None:
"""Constructor:
Args:
system: namedtuple containing "name", "apps", "instance_classes"
and "performances" for the problem to solve.
workloads: list of workloads, one per app. Each workload
is a namedtuple which contains a reference to the app, and a sequence
of N numbers which is the prediction for the next N timeslots. This
sequence must have the same length for all workloads in the list.
preallocation: number of reserved instances which are
preallocated. In phase I this parameter can be omitted (defaults to ``None``),
and in phase II it should contain the object returned by
``get_reserved_allocation()`` after solving phase I.
relaxed: if ``True``, the problem uses continuous variables
instead of integer ones.
"""
self.system = system
# Ensure that the workloads received are ordered by the field app in the same
# ordering than the list system.apps
self.workloads = reorder_workloads(workloads, system.apps)
if preallocation is None:
self.fixed_vms = None
else:
assert len(preallocation.instance_classes) == len(
preallocation.vms_number
), (
"preallocation is wrong, the number of elements in instance_classes and in "
"vms_number must be the same"
)
self.fixed_vms = dict(
zip(preallocation.instance_classes, preallocation.vms_number)
)
self.relaxed = relaxed
self.pulp_problem: Any = None
self.load_hist = get_load_hist_from_load(self.workloads)
self.solver_called = False
# CookedData stores some info required when building the problem, so that
# this data is gathered only once, during __init__, and used when required
CookedData = namedtuple( # pylint: disable=invalid-name
"CookedData",
[
"map_dem",
"map_res",
"instances_res",
"instances_dem",
"limiting_sets",
"instance_prices",
"instance_perfs",
],
)
# Separate the instances in two types: reserved and on-demand
# Also create dictionaries for fast lookup of price and performance, converted
# to the timeslot units
instances_res = []
instances_dem = []
instance_prices = {}
instance_perfs = {}
timeslot_length = self.workloads[0].time_unit
for iclass in system.instance_classes:
instance_prices[iclass] = iclass.price / TimeUnit(iclass.time_unit).to(
timeslot_length
)
for app in self.system.apps:
instance_perfs[iclass, app] = self.system.performances.values[
iclass, app
] / TimeUnit(self.system.performances.time_unit).to(timeslot_length)
if iclass.is_reserved:
instances_res.append(iclass)
else:
instances_dem.append(iclass)
# Compute the set of LimitingSets (clouds), extracted
# from the instances
limiting_sets = set()
for iclass in system.instance_classes:
limiting_sets.update(iclass.limiting_sets)
# Store cooked data
self.cooked = CookedData(
map_dem=None, # To be updated later by create_variables
map_res=None,
instances_res=instances_res,
instances_dem=instances_dem,
instance_prices=instance_prices,
instance_perfs=instance_perfs,
limiting_sets=limiting_sets,
)
def _create_variables(self) -> None:
"""Creates the set of variables Y* and X* of the PuLP problem.
Override it if you need to create extra variables (first use
``super().create_variables()`` to call the base class method)."""
if self.relaxed:
kind = LpContinuous
else:
kind = LpInteger
# List all combinations of apps and instances and workloads
comb_res = cartesian_product(self.system.apps, self.cooked.instances_res)
comb_dem = cartesian_product(
self.system.apps, self.cooked.instances_dem, self.load_hist.keys()
)
map_res = LpVariable.dicts("Y", comb_res, 0, None, kind)
map_dem = LpVariable.dicts("X", comb_dem, 0, None, kind)
self.cooked = self.cooked._replace(map_res=map_res, map_dem=map_dem)
def _cost_function(self) -> None:
"""Adds to the LP problem the function to optimize.
The function to optimize is the cost of the deployment. It is computed as
the sum of all Y_a_ic multiplied by the length of the period and by the price/timeslot
of each reserved instance class plus all X_a_ic_l multiplied by the price/timeslot
of each on-demand instance class and by the number of times that workload ``l``
appears in the period."""
period_length = sum(self.load_hist.values())
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[_a, _ic]
* self.cooked.instance_prices[_ic]
* period_length
for _a in self.system.apps
for _ic in self.cooked.instances_res
]
+ [
self.cooked.map_dem[_a, _ic, _l]
* self.cooked.instance_prices[_ic]
* self.load_hist[_l]
for _a in self.system.apps
for _ic in self.cooked.instances_dem
for _l in self.load_hist.keys()
]
),
"Objective: minimize cost",
)
def create_problem(self) -> "MallooviaLp":
"""Creates the PuLP problem with all variables and restrictions.
Returns:
pulp.LpProblem: instance of the PuLP problem.
"""
# Create the linear programming problem
self.pulp_problem = LpProblem(self.system.name, LpMinimize)
# Once we have the variables represented as tuples, we use
# the tuples to create the linear programming variables for pulp
self._create_variables()
# Create the goal function
self._cost_function()
# Add all restrictions indicated with functions *_restriction
# in this class
self._add_all_restrictions()
return self
def _add_all_restrictions(self) -> None:
"""This functions uses introspection to discover all implemented
methods whose name ends with ``_restriction``, and runs them all."""
for name in dir(self):
attribute = getattr(self, name)
if ismethod(attribute) and name.endswith("_restriction"):
attribute()
def performance_restriction(self) -> None:
"""Adds performance restriction to the problem.
This restriction forces, for each workload tuple, the performance of the
solution to be greater than or equal to that workload level for
all applications.
"""
for i, app in enumerate(self.system.apps):
perf_reserved = []
for ins in self.cooked.instances_res:
perf_reserved.append(
self.cooked.map_res[app, ins] * self.cooked.instance_perfs[ins, app]
)
for load in self.load_hist.keys():
perf_ondemand = []
for ins in self.cooked.instances_dem:
perf_ondemand.append(
self.cooked.map_dem[app, ins, load]
* self.cooked.instance_perfs[ins, app]
)
self.pulp_problem += (
lpSum(perf_reserved + perf_ondemand) >= load[i],
"Minimum performance for application {} "
"when workload is {}".format(app, load),
)
return
def limit_instances_per_class_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_vms`` per instance class restriction.
If the ``ic`` instance has a ``max_vms`` attribute, this is a limit for all
``Y_*_ic`` and ``X_*_ic_*`` variables."""
for ins in self.system.instance_classes:
if ins.max_vms == 0:
continue # No limit for this instance class
if ins.is_reserved:
self.pulp_problem += (
lpSum(self.cooked.map_res[app, ins] for app in self.system.apps)
<= ins.max_vms,
"Max instances reserved " "instance class {}".format(ins),
)
else:
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
self.cooked.map_dem[app, ins, load]
for app in self.system.apps
)
<= ins.max_vms,
"Max instances for on-demand instance "
"class {} when workload is {}".format(ins, load),
)
def set_fixed_instances_restriction(self) -> None:
"""Adds restrictions for variables with pre-fixed values.
For every ``ic`` in ``self.fixed_vms`` a restriction is
added which forces the total number of those instance classes in
the solution to be at equal to a given value for reserved instances,
and at least equal to a given value for on-demand instances.
This is used mainly in phase II to ensure that reserved instances
are fixed, or to allow to keep at least some number of on-demand
instances running from previous timeslots, when using "guided"
strategies"."""
if self.fixed_vms is None: # No fixed instances, we are in PhaseI
return
for ins, value in self.fixed_vms.items():
if ins.is_reserved:
self.pulp_problem += (
lpSum(self.cooked.map_res[app, ins] for app in self.system.apps)
== value,
"Reserved instance class {} " "is fixed to {}".format(ins, value),
)
else:
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
self.cooked.map_dem[app, ins, load]
for app in self.system.apps
)
>= value,
"On-demand instance class {} is at least {} "
"when workload is {}".format(ins, value, load),
)
def limit_instances_per_limiting_set_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_vms`` per limiting set restriction.
If the limiting set provides a max_vms > 0, then the sum of all
instances which are member of that limiting set should be limited
to that maximum."""
for cloud in self.cooked.limiting_sets:
if cloud.max_vms == 0:
continue # No restriction for this limiting set
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[app, ic]
for app in self.system.apps
for ic in self.cooked.instances_res
if cloud in ic.limiting_sets
]
+ [
self.cooked.map_dem[app, ic, load]
for app in self.system.apps
for ic in self.cooked.instances_dem
if cloud in ic.limiting_sets
]
)
<= cloud.max_vms,
"Max instances for limiting set {} "
"when workload is {}".format(cloud, load),
)
def limit_cores_per_limiting_set_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_cores`` per limiting set restriction.
If the limiting set provides a max_cores > 0, then the sum of all
instance cores among all instance classes which are member of that
limiting set should be limited to that maximum."""
for cloud in self.cooked.limiting_sets:
if cloud.max_cores == 0:
continue # No restriction for this limiting set
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[app, ic] * ic.cores
for app in self.system.apps
for ic in self.cooked.instances_res
if cloud in ic.limiting_sets
]
+ [
self.cooked.map_dem[app, ic, load] * ic.cores
for app in self.system.apps
for ic in self.cooked.instances_dem
if cloud in ic.limiting_sets
]
)
<= cloud.max_cores,
"Max cores for limiting set {} "
"when workload is {}".format(cloud, load),
)
def solve(self, *args, **kwargs):
"""Calls PuLP solver.
Args:
*args: positional args passed to ``LpProblem.solve()``
\\**kwargs: keyword args passed to ``LpProblem.solve()``.
Returns:
the value returned by ``LpProblem.solve()``.
"""
self.solver_called = True
return self.pulp_problem.solve(*args, **kwargs)
def get_status(self) -> Status:
"""Returns the status of the problem"""
if not self.solver_called:
return Status.unsolved
return pulp_to_malloovia_status(self.pulp_problem.status)
def get_cost(self) -> float:
"""Gets the cost of the problem, obtained after solving it.
Returns:
The cost of the optimal solution found by PuLP.
Raises:
ValueError: when the problem is yet unsolved.
"""
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
return pulp.value(self.pulp_problem.objective)
def get_allocation(self) -> AllocationInfo:
"""Retrieves the allocation given by the solution of the LP problem.
Returns:
The allocation given by the solution.
Raises:
ValueError: if no solution is available (unsolved or infeasible problem)
"""
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
workload_tuples = []
repeats = []
allocation = []
for load, repeat in self.load_hist.items():
workload_tuples.append(load)
repeats.append(repeat)
workload_allocation = []
for app in self.system.apps:
row = list(
self.cooked.map_res[app, i].varValue
for i in self.cooked.instances_res
)
row.extend(
self.cooked.map_dem[app, i, load].varValue
for i in self.cooked.instances_dem
)
workload_allocation.append(tuple(row))
allocation.append(tuple(workload_allocation))
return AllocationInfo(
apps=tuple(self.system.apps),
instance_classes=tuple(
self.cooked.instances_res + self.cooked.instances_dem
),
workload_tuples=workload_tuples,
repeats=repeats,
values=tuple(allocation),
units="vms",
)
def get_reserved_allocation(self) -> ReservedAllocation:
"""Retrieves the allocation of reserved instances from the solution of the LP problem.
Returns:
The total number of reserved instance classes of each
type to be purchased for the whole reservation period.
Raises:
ValueError: if no solution is available (unsolved or infeasible problem)
"""
# Returns the solution as a list of numbers, each one
# representing the required number of vms of each reserved type, stored
# in the field "vms_number" of the object.
# This number is valid for any workload tuple, and for every timeslot
# in the reservation period. Also, it does not depend on the applications
# because it is the total number of reserved instances for all apps.
# The returned class also stores the list "instance_classes" which provides
# the instance class associated with each index in the above table.
# So, if r is the value returned, the value of r.vms_number[i]
# (being i an integer) is the number of VMs to be allocated
# from reserved instance class r.instance_classes[i], for every
# timeslot and for the set of all apps.
# This is all the information required for PhaseII.
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
allocation: List[float] = []
for _ in self.load_hist: # Loop over all possible workloads
workload_allocation: List[float] = []
for iclass in self.cooked.instances_res:
i_allocation = sum(
self.cooked.map_res[app, iclass].varValue
for app in self.system.apps
)
workload_allocation.append(i_allocation)
# The obtained allocation MUST be the same for any workload
assert allocation == [] or allocation == workload_allocation
allocation = workload_allocation
return ReservedAllocation(
instance_classes=tuple(self.cooked.instances_res),
vms_number=tuple(allocation),
)
class ShortReprTuple(tuple):
"""This class implements a tuple whose repr is not standard
but uses instead the hash of the tuple, to ensure a constant
length of the repr.
This is required to store keys in the histogram, because they
are used to name LP variables which otherwise would have
a name too long for the solver if the number of apps is large.
"""
def __repr__(self):
return str(hash(self))
def get_load_hist_from_load(workloads: Sequence[Workload]) -> MallooviaHistogram:
"""Computes the histogram of the workloads.
Args:
workloads: a sequence of :class:`Workload` objects, each one
containing the fields ``app`` (which identifies the app producing this
workload) and ``values`` (which stores a sequence of numbers representing
the workload for each timeslot for that app).
Returns:
A dictionary where the key is the workload for one timeslot,
expressed as a tuple with one element for each application, and the value
is the number of timeslots in which that workload was found.
"""
hist = MallooviaHistogram()
hist.apps = tuple(w.app for w in workloads)
timeslots = len(workloads[0].values)
# Ensure that all workloads have the same length and units
assert all(
len(w.values) == timeslots for w in workloads
), "All workloads should have the same length"
# Iterate over tuples of loads, one tuple per timeslot
workload_tuples = zip(*(w.values for w in workloads))
for load in workload_tuples:
hist[ShortReprTuple(load)] += 1
return hist
def reorder_workloads(
workloads: Sequence[Workload], apps: Sequence[App]
) -> Sequence[Workload]:
"""Returns the a new workload list ordered as the list of apps.
Args:
workloads: Sequence of workloads to reorder
apps: Sequence of apps which dictate the new ordering
Returns:
A new sequence of workloads, ordered by app in the order given by apps argument.
"""
map_apps_workloads = {workload.app: workload for workload in workloads}
ordered_workloads = []
for app in apps:
ordered_workloads.append(map_apps_workloads[app])
return tuple(ordered_workloads)
class MallooviaLpMaximizeTimeslotPerformance(MallooviaLp):
"""Find the allocation which maximizes performance for a single timeslot.
This problem is the dual of MallooviaLp. Instead of minimizing the cost
while providing the minimum performances, the problem to solve now is
to maximize the performance without breaking the limits.
The class inherits from Malloovia the initialization methods as well as
the ones to get the cost and allocation of the solution, but overrides
the function to be optimized and some of the constraints.
"""
def _cost_function(self) -> None:
"""Adds to the LP problem the function to optimize (maximize in this case).
The function to optimize is the performance of the deployment. However, since
the system is composed to several applications, no single "performance" exists.
The solution is to maximize the "fraction of performance fulfilled", i.e., the
sum of `X(_a,_ic,_l)*_ic.performance/_l[a]` among all `_a` and `_ic`.
"""
workloads = {wl.app: wl.values[0] for wl in self.workloads}
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[_a, _ic]
* self.cooked.instance_perfs[_ic, _a]
/ workloads[_a]
for _a in self.system.apps
for _ic in self.cooked.instances_res
]
+ [
self.cooked.map_dem[_a, _ic, _l]
* self.cooked.instance_perfs[_ic, _a]
/ workloads[_a]
for _a in self.system.apps
for _ic in self.cooked.instances_dem
for _l in self.load_hist.keys()
]
),
"Objective: maximize fulfilled workload fraction",
)
def create_problem(self) -> "MallooviaLpMaximizeTimeslotPerformance":
"""This method creates the PuLP problem, and calls other
methods to add variables and restrictions to it.
It initializes the attribute 'self.prob' with the
instance of the PuLP problem created.
"""
# Create the linear programming problem
self.pulp_problem = LpProblem(self.system.name, LpMaximize)
# Create the linear programming variables for pulp
self._create_variables()
# Create the goal function
self._cost_function()
# Add all restrictions indicated with functions *_restriction
# in this class
self._add_all_restrictions()
return self
def performance_restriction(self) -> None:
"""Adds performance restriction to the problem.
This restriction forces, for each workload tuple, the performance of the
solution to be less than or equal to that workload level, for
all applications.
"""
for i, app in enumerate(self.system.apps):
perf_reserved = []
for ins in self.cooked.instances_res:
perf_reserved.append(
self.cooked.map_res[app, ins] * self.cooked.instance_perfs[ins, app]
)
for load in self.load_hist.keys():
perf_ondemand = []
for ins in self.cooked.instances_dem:
perf_ondemand.append(
self.cooked.map_dem[app, ins, load]
* self.cooked.instance_perfs[ins, app]
)
self.pulp_problem += (
lpSum(perf_reserved + perf_ondemand) <= load[i],
"Maximum performance for application {} "
"when workload is {}".format(app, load),
)
def get_cost(self) -> float:
"""Gets the cost of the problem, obtained after solving it.
Returns:
The cost of the optimal solution found by PuLP.
Raises:
ValueError: when the problem is yet unsolved.
"""
if self.pulp_problem.status == pulp.LpStatusNotSolved: # Not solved
raise ValueError("Cannot get the cost of an unsolved problem")
return sum(
self.cooked.instance_prices[ic] * self.cooked.map_res[app, ic].varValue
for ic in self.cooked.instances_res
for app in self.system.apps
) + sum(
self.cooked.instance_prices[ic]
* self.cooked.map_dem[app, ic, wl].varValue
* self.load_hist[wl]
for ic in self.cooked.instances_dem
for app in self.system.apps
for wl in self.load_hist.keys()
)
# The following function is used to monkey patch part of PuLP code.
# This modification is aimed to get the value of the optimal best bound
# which is provided by CBC solver as part of the solution, even if
# the solution could not be found due to a time limit
#
# PuLP does not recover this value, but for our analysis is useful
# to estimate the worst-case error of our approximation when the
# exact solution cannot be found in a reasonable time.
#
# The code patches the part in which PuLP calls CBC, so that the standard
# output of CBC is redirected to a logfile. When CBC exits, the code
# inspects the logfile and locates the bestBound value, storing it
# as part of the problem to make it accessible to the python code.
#
# This patch only works when the solver is COIN.
# pylint: disable=invalid-name,too-many-locals,missing-docstring,bare-except,too-many-branches,too-many-statements
def _solve_CBC_patched(self, lp, use_mps=True): # pragma: no cover
"""Solve a MIP problem using CBC, patched from original PuLP function
to save a log with cbc's output and take from it the best bound."""
def takeBestBoundFromLog(filename):
try:
with open(filename, "r") as f:
for l in f:
if l.startswith("Lower bound:"):
return float(l.split(":")[-1])
except:
pass
return None
if not self.executable(self.path):
raise PulpSolverError("Pulp: cannot execute %s cwd: %s" %
(self.path, os.getcwd()))
if not self.keepFiles:
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, "%s-pulp.lp" % uuid)
tmpMps = os.path.join(self.tmpDir, "%s-pulp.mps" % uuid)
tmpSol = os.path.join(self.tmpDir, "%s-pulp.sol" % uuid)
tmpSol_init = os.path.join(self.tmpDir, "%s-pulp_init.sol" % uuid)
else:
tmpLp = lp.name+"-pulp.lp"
tmpMps = lp.name+"-pulp.mps"
tmpSol = lp.name+"-pulp.sol"
tmpSol_init = lp.name + "-pulp_init.sol"
if use_mps:
vs, variablesNames, constraintsNames, objectiveName = lp.writeMPS(tmpMps, rename = 1)
cmds = ' '+tmpMps+" "
if lp.sense == LpMaximize:
cmds += 'max '
else:
vs = lp.writeLP(tmpLp)
# In the Lp we do not create new variable or constraint names:
variablesNames = dict((v.name, v.name) for v in vs)
constraintsNames = dict((c, c) for c in lp.constraints)
objectiveName = None
cmds = ' '+tmpLp+" "
if self.mip_start:
self.writesol(tmpSol_init, lp, vs, variablesNames, constraintsNames)
cmds += 'mips {} '.format(tmpSol_init)
if self.threads:
cmds += "threads %s "%self.threads
if self.fracGap is not None:
cmds += "ratio %s "%self.fracGap
if self.maxSeconds is not None:
cmds += "sec %s "%self.maxSeconds
if self.presolve:
cmds += "presolve on "
if self.strong:
cmds += "strong %d " % self.strong
if self.cuts:
cmds += "gomory on "
# cbc.write("oddhole on "
cmds += "knapsack on "
cmds += "probing on "
for option in self.options:
cmds += option+" "
if self.mip:
cmds += "branch "
else:
cmds += "initialSolve "
cmds += "printingOptions all "
cmds += "solution "+tmpSol+" "
# if self.msg:
# pipe = None
# else:
# pipe = open(os.devnull, 'w')
log.debug(self.path + cmds)
with open(tmpLp + ".log", 'w') as pipe:
cbc = subprocess.Popen((self.path + cmds).split(), stdout=pipe,
stderr=pipe)
if cbc.wait() != 0:
raise PulpSolverError("Pulp: Error while trying to execute " +
self.path)
if not os.path.exists(tmpSol):
raise PulpSolverError("Pulp: Error while executing "+self.path)
if use_mps:
status, values, reducedCosts, shadowPrices, slacks, sol_status = \
self.readsol_MPS(tmpSol, lp, lp.variables(), variablesNames, constraintsNames)
else:
status, values, reducedCosts, shadowPrices, slacks, sol_status = self.readsol_LP(
tmpSol, lp, lp.variables()
)
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks, activity=True)
lp.assignStatus(status, sol_status)
lp.bestBound = takeBestBoundFromLog(tmpLp + ".log")
if not self.keepFiles:
for f in [tmpMps, tmpLp, tmpSol, tmpSol_init]:
try:
os.remove(f)
except:
pass
return status
# Monkey patching
COIN_CMD.solve_CBC = _solve_CBC_patched
__all__ = [
"MallooviaLp",
"get_load_hist_from_load",
"MallooviaLpMaximizeTimeslotPerformance",
]
| mit |
ClimbsRocks/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tseries/tests/test_frequencies.py | 9 | 25284 | from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
def test_legacy_offset_warnings(self):
for k, v in compat.iteritems(frequencies._rule_aliases):
with tm.assert_produces_warning(FutureWarning):
result = frequencies.get_offset(k)
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro()))
assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli()))
assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano()))
assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro()))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_stats_cluster_methods.py | 6 | 8607 | # doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
For more information, see:
Ridgway et al. 2012, "The problem of low variance voxels in
statistical parametric mapping; a new hat avoids a 'haircut'",
NeuroImage. 2012 Feb 1;59(3):2131-41.
Smith and Nichols 2009, "Threshold-free cluster enhancement:
addressing problems of smoothing, threshold dependence, and
localisation in cluster inference", NeuroImage 44 (2009) 83-98.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
# this changes hidden MPL vars:
from mpl_toolkits.mplot3d import Axes3D # noqa
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
# --------------
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# ------------------------
#
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# ------------------
#
# .. note::
# X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions:
X = X.reshape((n_subjects, 1, n_src))
###############################################################################
# Now let's do some clustering using the standard method.
#
# .. note::
# Not specifying a connectivity matrix implies grid-like connectivity,
# which we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
# -----------------
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
| bsd-3-clause |
fw1121/Roary | contrib/roary_plots/roary_plots.py | 1 | 5754 | #!/usr/bin/env python
# Copyright (C) <2015> EMBL-European Bioinformatics Institute
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Neither the institution name nor the name roary_plots
# can be used to endorse or promote products derived from
# this software without prior written permission.
# For written permission, please contact <marco@ebi.ac.uk>.
# Products derived from this software may not be called roary_plots
# nor may roary_plots appear in their names without prior written
# permission of the developers. You should have received a copy
# of the GNU General Public License along with this program.
# If not, see <http://www.gnu.org/licenses/>.
__author__ = "Marco Galardini"
__version__ = '0.1.0'
def get_options():
import argparse
# create the top-level parser
description = "Create plots from roary outputs"
parser = argparse.ArgumentParser(description = description,
prog = 'roary_plots.py')
parser.add_argument('tree', action='store',
help='Newick Tree file', default='accessory_binary_genes.fa.newick')
parser.add_argument('spreadsheet', action='store',
help='Roary gene presence/absence spreadsheet', default='gene_presence_absence.csv')
parser.add_argument('--version', action='version',
version='%(prog)s '+__version__)
return parser.parse_args()
if __name__ == "__main__":
options = get_options()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
import os
import pandas as pd
import numpy as np
from Bio import Phylo
t = Phylo.read(options.tree, 'newick')
# Max distance to create better plots
mdist = max([t.distance(t.root, x) for x in t.get_terminals()])
# Load roary
roary = pd.read_table(options.spreadsheet,
sep=',',
low_memory=False)
# Set index (group name)
roary.set_index('Gene', inplace=True)
# Drop the other info columns
roary.drop(list(roary.columns[:10]), axis=1, inplace=True)
# Transform it in a presence/absence matrix (1/0)
roary.replace('.{2,100}', 1, regex=True, inplace=True)
roary.replace(np.nan, 0, regex=True, inplace=True)
# Sort the matrix by the sum of strains presence
idx = roary.sum(axis=1).order(ascending=False).index
roary_sorted = roary.ix[idx]
# Pangenome frequency plot
plt.figure(figsize=(7, 5))
plt.hist(roary.sum(axis=1), roary.shape[1],
histtype="stepfilled", alpha=.7)
plt.xlabel('Number of genomes')
plt.ylabel('Number of genes')
sns.despine(left=True,
bottom=True)
plt.savefig('pangenome_frequency.png')
plt.clf()
# Sort the matrix according to tip labels in the tree
roary_sorted = roary_sorted[[x.name for x in t.get_terminals()]]
# Plot presence/absence matrix against the tree
with sns.axes_style('whitegrid'):
fig = plt.figure(figsize=(17, 10))
ax1=plt.subplot2grid((1,40), (0, 10), colspan=30)
a=ax1.matshow(roary_sorted.T, cmap=plt.cm.Blues,
vmin=0, vmax=1,
aspect='auto',
interpolation='none',
)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.axis('off')
ax = fig.add_subplot(1,2,1)
ax=plt.subplot2grid((1,40), (0, 0), colspan=10, axisbg='white')
fig.subplots_adjust(wspace=0, hspace=0)
ax1.set_title('Roary matrix\n(%d gene clusters)'%roary.shape[0])
Phylo.draw(t, axes=ax,
show_confidence=False,
label_func=lambda x: None,
xticks=([],), yticks=([],),
ylabel=('',), xlabel=('',),
xlim=(-0.01,mdist+0.01),
axis=('off',),
title=('parSNP tree\n(%d strains)'%roary.shape[1],),
do_show=False,
)
plt.savefig('pangenome_matrix.png')
plt.clf()
# Plot the pangenome pie chart
plt.figure(figsize=(10, 10))
core = roary[(roary.sum(axis=1) >= roary.shape[1]*0.99) & (roary.sum(axis=1) <= roary.shape[1] )].shape[0]
softcore = roary[(roary.sum(axis=1) >= roary.shape[1]*0.95) & (roary.sum(axis=1) < roary.shape[1]*0.99)].shape[0]
shell = roary[(roary.sum(axis=1) >= roary.shape[1]*0.15) & (roary.sum(axis=1) < roary.shape[1]*0.95)].shape[0]
cloud = roary[roary.sum(axis=1) < roary.shape[1]*0.15].shape[0]
total = roary.shape[0]
def my_autopct(pct):
val=int(round(pct*total/100.0))
return '{v:d}'.format(v=val)
a=plt.pie([core, softcore, shell, cloud],
labels=['core\n(%d <= strains <= %d)'%(roary.shape[1]*.99,roary.shape[1]),
'soft-core\n(%d <= strains < %d)'%(roary.shape[1]*.95,roary.shape[1]*.99),
'shell\n(%d <= strains < %d)'%(roary.shape[1]*.15,roary.shape[1]*.95),
'cloud\n(strains < %d)'%(roary.shape[1]*.15)],
explode=[0.1, 0.05, 0.02, 0], radius=0.9,
colors=[(0, 0, 1, float(x)/total) for x in (core, softcore, shell, cloud)],
autopct=my_autopct)
plt.savefig('pangenome_pie.png')
plt.clf()
| gpl-3.0 |
OshynSong/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 83 | 34544 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
nrhine1/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
hantek/BinaryConnect | mnist.py | 1 | 6258 | # Copyright 2015 Matthieu Courbariaux, Zhouhan Lin
"""
This file is adapted from BinaryConnect:
https://github.com/MatthieuCourbariaux/BinaryConnect
Running this script should reproduce the results of a feed forward net trained
on MNIST.
To train a vanilla feed forward net with ordinary backprop:
1. type "git checkout fullresolution" to switch to the "fullresolution" branch
2. execute "python mnist.py"
To train a feed forward net with Binary Connect + quantized backprop:
1. type "git checkout binary" to switch to the "binary" branch
2. execute "python mnist.py"
To train a feed forward net with Ternary Connect + quantized backprop:
1. type "git checkout ternary" to switch to the "ternary" branch
2. execute "python mnist.py"
"""
import gzip
import cPickle
import numpy as np
import os
import os.path
import sys
import time
from trainer import Trainer
from model import Network
from layer import linear_layer, ReLU_layer
from pylearn2.datasets.mnist import MNIST
from pylearn2.utils import serial
if __name__ == "__main__":
rng = np.random.RandomState(1234)
train_set_size = 50000
# data augmentation
zero_pad = 0
affine_transform_a = 0
affine_transform_b = 0
horizontal_flip = False
# batch
# keep a multiple a factor of 10000 if possible
# 10000 = (2*5)^4
batch_size = 200
number_of_batches_on_gpu = train_set_size/batch_size
BN = True
BN_epsilon=1e-4 # for numerical stability
BN_fast_eval= True
dropout_input = 1.
dropout_hidden = 1.
shuffle_examples = True
shuffle_batches = False
# Termination criteria
n_epoch = 1000
monitor_step = 2
# LR
LR = .3
LR_fin = .01
LR_decay = (LR_fin/LR)**(1./n_epoch)
M= 0.
# architecture
n_inputs = 784
n_units = 1024
n_classes = 10
n_hidden_layer = 3
# BinaryConnect
BinaryConnect = True
stochastic = True
# Old hyperparameters
binary_training=False
stochastic_training=False
binary_test=False
stochastic_test=False
if BinaryConnect == True:
binary_training=True
if stochastic == True:
stochastic_training=True
else:
binary_test=True
print 'Loading the dataset'
train_set = MNIST(which_set= 'train', start=0, stop = train_set_size, center = True)
valid_set = MNIST(which_set= 'train', start=50000, stop = 60000, center = True)
test_set = MNIST(which_set= 'test', center = True)
# bc01 format
train_set.X = train_set.X.reshape(train_set_size,1,28,28)
valid_set.X = valid_set.X.reshape(10000,1,28,28)
test_set.X = test_set.X.reshape(10000,1,28,28)
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2* train_set.y - 1.
valid_set.y = 2* valid_set.y - 1.
test_set.y = 2* test_set.y - 1.
print 'Creating the model'
class PI_MNIST_model(Network):
def __init__(self, rng):
Network.__init__(self, n_hidden_layer = n_hidden_layer, BN = BN)
print " Fully connected layer 1:"
self.layer.append(ReLU_layer(rng = rng, n_inputs = n_inputs, n_units = n_units,
BN = BN, BN_epsilon=BN_epsilon, dropout=dropout_input,
binary_training=binary_training, stochastic_training=stochastic_training,
binary_test=binary_test, stochastic_test=stochastic_test))
for k in range(n_hidden_layer-1):
print " Fully connected layer "+ str(k) +":"
self.layer.append(ReLU_layer(rng = rng, n_inputs = n_units, n_units = n_units,
BN = BN, BN_epsilon=BN_epsilon, dropout=dropout_hidden,
binary_training=binary_training, stochastic_training=stochastic_training,
binary_test=binary_test, stochastic_test=stochastic_test))
print " L2 SVM layer:"
self.layer.append(linear_layer(rng = rng, n_inputs = n_units, n_units = n_classes,
BN = BN, BN_epsilon=BN_epsilon, dropout=dropout_hidden,
binary_training=binary_training, stochastic_training=stochastic_training,
binary_test=binary_test, stochastic_test=stochastic_test))
model = PI_MNIST_model(rng = rng)
print 'Creating the trainer'
trainer = Trainer(rng = rng,
train_set = train_set, valid_set = valid_set, test_set = test_set,
model = model, load_path = None, save_path = None,
zero_pad=zero_pad,
affine_transform_a=affine_transform_a, # a is (more or less) the rotations
affine_transform_b=affine_transform_b, # b is the translations
horizontal_flip=horizontal_flip,
LR = LR, LR_decay = LR_decay, LR_fin = LR_fin,
M = M,
BN = BN, BN_fast_eval=BN_fast_eval,
batch_size = batch_size, number_of_batches_on_gpu = number_of_batches_on_gpu,
n_epoch = n_epoch, monitor_step = monitor_step,
shuffle_batches = shuffle_batches, shuffle_examples = shuffle_examples)
print 'Building'
trainer.build()
print 'Training'
start_time = time.clock()
trainer.train()
end_time = time.clock()
print 'The training took %i seconds'%(end_time - start_time)
print 'Display weights'
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from filter_plot import tile_raster_images
W = np.transpose(model.layer[0].W.get_value())
W = tile_raster_images(W,(28,28),(4,4),(2, 2))
plt.imshow(W, cmap = cm.Greys_r)
plt.savefig(core_path + '_features.png')
| gpl-2.0 |
marscher/mdtraj | MDTraj/core/trajectory.py | 1 | 51903 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A. Beauchamp, TJ Lane, Joshua Adelman, Lee-Ping Wang
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import warnings
import functools
from copy import deepcopy
import numpy as np
from mdtraj.formats import DCDTrajectoryFile
from mdtraj.formats import BINPOSTrajectoryFile
from mdtraj.formats import XTCTrajectoryFile
from mdtraj.formats import TRRTrajectoryFile
from mdtraj.formats import HDF5TrajectoryFile
from mdtraj.formats import NetCDFTrajectoryFile
from mdtraj.formats import LH5TrajectoryFile
from mdtraj.formats import PDBTrajectoryFile
from mdtraj.formats import MDCRDTrajectoryFile
from mdtraj.formats import ArcTrajectoryFile
from mdtraj.formats.prmtop import load_prmtop
from mdtraj.core.topology import Topology
from mdtraj.utils import (ensure_type, in_units_of, lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles, cast_indices)
from mdtraj.utils.six.moves import xrange
from mdtraj.utils.six import PY3, string_types
from mdtraj import _rmsd
from mdtraj import _FormatRegistry
##############################################################################
# Globals
##############################################################################
__all__ = ['open', 'load', 'iterload', 'load_frame', 'Trajectory']
##############################################################################
# Utilities
##############################################################################
def _assert_files_exist(filenames):
"""Throw an IO error if files don't exist
Parameters
----------
filenames : {str, [str]}
String or list of strings to check
"""
if isinstance(filenames, string_types):
filenames = [filenames]
for fn in filenames:
if not (os.path.exists(fn) and os.path.isfile(fn)):
raise IOError('No such file: %s' % fn)
def _parse_topology(top):
"""Get the topology from a argument of indeterminate type
If top is a string, we try loading a pdb, if its a trajectory
we extract its topology.
Returns
-------
topology : md.Topology
"""
try:
ext = os.path.splitext(top)[1]
except:
ext = None # might not be a string
if isinstance(top, string_types) and (ext in ['.pdb', '.h5','.lh5']):
_traj = load_frame(top, 0)
topology = _traj.topology
elif isinstance(top, string_types) and (ext == '.prmtop'):
topology = load_prmtop(top)
elif isinstance(top, Trajectory):
topology = top.topology
elif isinstance(top, Topology):
topology = top
else:
raise TypeError('A topology is required. You supplied top=%s' % str(top))
return topology
##############################################################################
# Utilities
##############################################################################
def open(filename, mode='r', force_overwrite=True, **kwargs):
"""Open a trajectory file-like object
This factor function returns an instance of an open file-like
object capable of reading/writing the trajectory (depending on
'mode'). It does not actually load the trajectory from disk or
write anything.
Parameters
----------
filename : str
Path to the trajectory file on disk
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for
write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
Other Parameters
----------------
kwargs : dict
Other keyword parameters are passed directly to the file object
Returns
-------
fileobject : object
Open trajectory file, whose type is determined by the filename
extension
See Also
--------
load, ArcTrajectoryFile, BINPOSTrajectoryFile, DCDTrajectoryFile,
HDF5TrajectoryFile, LH5TrajectoryFile, MDCRDTrajectoryFile,
NetCDFTrajectoryFile, PDBTrajectoryFile, TRRTrajectoryFile,
XTCTrajectoryFile
"""
extension = os.path.splitext(filename)[1]
try:
loader = _FormatRegistry.fileobjects[extension]
except KeyError:
raise IOError('Sorry, no loader for filename=%s (extension=%s) '
'was found. I can only load files with extensions in %s'
% (filename, extension, _FormatRegistry.fileobjects.keys()))
return loader(filename, mode=mode, force_overwrite=force_overwrite, **kwargs)
def load_frame(filename, index, top=None, atom_indices=None):
"""Load a single frame from a trajectory file
Parameters
----------
filename : str
Path to the trajectory file on disk
index : int
Load the `index`-th frame from the specified file
top : {str, Trajectory, Topology}
Most trajectory formats do not contain topology information. Pass in
either the path to a RCSB PDB file, a trajectory, or a topology to
supply this information.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format).
Examples
--------
>>> import mdtraj as md
>>> first_frame = md.load_frame('traj.h5', 0)
>>> print first_frame
<mdtraj.Trajectory with 1 frames, 22 atoms>
See Also
--------
load, load_frame
Returns
-------
trajectory : md.Trajectory
The resulting conformation, as an md.Trajectory object containing
a single frame.
"""
_assert_files_exist(filename)
extension = os.path.splitext(filename)[1]
try:
loader = _FormatRegistry.loaders[extension]
except KeyError:
raise IOError('Sorry, no loader for filename=%s (extension=%s) '
'was found. I can only load files with extensions in %s'
% (filename, extension, _FormatRegistry.loaders.keys()))
kwargs = {'atom_indices': atom_indices}
if loader.__name__ not in ['load_hdf5', 'load_pdb']:
kwargs['top'] = top
return loader(filename, frame=index, **kwargs)
def load(filename_or_filenames, discard_overlapping_frames=False, **kwargs):
"""Load a trajectory from one or more files on disk.
This function dispatches to one of the specialized trajectory loaders based
on the extension on the filename. Because different trajectory formats save
different information on disk, the specific keyword argument options supported
depend on the specific loaded.
Parameters
----------
filename_or_filenames : {str, list of strings}
Filename or list of filenames containing trajectory files of a single format.
discard_overlapping_frames : bool, default=False
Look for overlapping frames between the last frame of one filename and
the first frame of a subsequent filename and discard them
Other Parameters
----------------
top : {str, Trajectory, Topology}
Most trajectory formats do not contain topology information. Pass in
either the path to a RCSB PDB file, a trajectory, or a topology to
supply this information. This option is not required for the .h5, .lh5,
and .pdb formats, which already contain topology information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it
requires an extra copy, but will save memory.
See Also
--------
load_frame, iterload
Examples
--------
>>> import mdtraj as md
>>> traj = md.load('output.xtc', top='topology.pdb')
>>> print traj
<mdtraj.Trajectory with 500 frames, 423 atoms at 0x110740a90>
>>> traj2 = md.load('output.xtc', stride=2, top='topology.pdb')
>>> print traj2
<mdtraj.Trajectory with 250 frames, 423 atoms at 0x11136e410>
>>> traj3 = md.load_hdf5('output.xtc', atom_indices=[0,1] top='topology.pdb')
>>> print traj3
<mdtraj.Trajectory with 500 frames, 2 atoms at 0x18236e4a0>
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
"""
_assert_files_exist(filename_or_filenames)
if "top" in kwargs: # If applicable, pre-loads the topology from PDB for major performance boost.
kwargs["top"] = _parse_topology(kwargs["top"])
# grab the extension of the filename
if isinstance(filename_or_filenames, string_types): # If a single filename
extension = os.path.splitext(filename_or_filenames)[1]
filename = filename_or_filenames
else: # If multiple filenames, take the first one.
extensions = [os.path.splitext(filename_i)[1] for filename_i in filename_or_filenames]
if len(set(extensions)) != 1:
raise(TypeError("All filenames must have same extension!"))
else:
t = [load(f, **kwargs) for f in filename_or_filenames]
# we know the topology is equal because we sent the same topology
# kwarg in, so there's no reason to spend extra time checking
return t[0].join(t[1:], discard_overlapping_frames=discard_overlapping_frames,
check_topology=False)
try:
#loader = _LoaderRegistry[extension][0]
loader = _FormatRegistry.loaders[extension]
except KeyError:
raise IOError('Sorry, no loader for filename=%s (extension=%s) '
'was found. I can only load files '
'with extensions in %s' % (filename, extension, _FormatRegistry.loaders.keys()))
if loader.__name__ in ['load_hdf5', 'load_pdb', 'load_lh5']:
if 'top' in kwargs:
warnings.warn('top= kwarg ignored since file contains topology information')
# this is a little hack that makes calling load() more predicable. since
# most of the loaders take a kwargs "top" except for load_hdf5, (since
# it saves the topology inside the file), we often end up calling
# load_hdf5 via this function with the top kwarg specified. but then
# there would be a signature binding error. it's easier just to ignore
# it.
kwargs.pop('top', None)
value = loader(filename, **kwargs)
return value
def iterload(filename, chunk=100, **kwargs):
"""An iterator over a trajectory from one or more files on disk, in fragments
This may be more memory efficient than loading an entire trajectory at
once
Parameters
----------
filename : str
Path to the trajectory file on disk
chunk : int
Number of frames to load at once from disk per iteration.
Other Parameters
----------------
top : {str, Trajectory, Topology}
Most trajectory formats do not contain topology information. Pass in
either the path to a RCSB PDB file, a trajectory, or a topology to
supply this information. This option is not required for the .h5, .lh5,
and .pdb formats, which already contain topology information.
stride : int, default=None
Only read every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it
requires an extra copy, but will save memory.
See Also
--------
load, load_frame
Examples
--------
>>> import mdtraj as md
>>> for chunk in md.iterload('output.xtc', top='topology.pdb')
... print chunk
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
<mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90>
"""
stride = kwargs.get('stride', 1)
atom_indices = cast_indices(kwargs.get('atom_indices', None))
if chunk % stride != 0:
raise ValueError('Stride must be a divisor of chunk. stride=%d does not go '
'evenly into chunk=%d' % (stride, chunk))
if filename.endswith('.h5'):
if 'top' in kwargs:
warnings.warn('top= kwarg ignored since file contains topology information')
with HDF5TrajectoryFile(filename) as f:
if atom_indices is None:
topology = f.topology
else:
topology = f.topology.subset(atom_indices)
while True:
data = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)
if data == []:
raise StopIteration()
in_units_of(data.coordinates, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(data.cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
yield Trajectory(xyz=data.coordinates, topology=topology,
time=data.time, unitcell_lengths=data.cell_lengths,
unitcell_angles=data.cell_angles)
if filename.endswith('.lh5'):
if 'top' in kwargs:
warnings.warn('top= kwarg ignored since file contains topology information')
with LH5TrajectoryFile(filename) as f:
if atom_indices is None:
topology = f.topology
else:
topology = f.topology.subset(atom_indices)
ptr = 0
while True:
xyz = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
raise StopIteration()
in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(ptr, ptr+len(xyz)*stride, stride)
ptr += len(xyz)*stride
yield Trajectory(xyz=xyz, topology=topology, time=time)
elif filename.endswith('.xtc'):
topology = _parse_topology(kwargs.get('top', None))
with XTCTrajectoryFile(filename) as f:
while True:
xyz, time, step, box = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
raise StopIteration()
in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(box, f.distance_unit, Trajectory._distance_unit, inplace=True)
trajectory = Trajectory(xyz=xyz, topology=topology, time=time)
trajectory.unitcell_vectors = box
yield trajectory
elif filename.endswith('.dcd'):
topology = _parse_topology(kwargs.get('top', None))
with DCDTrajectoryFile(filename) as f:
ptr = 0
while True:
# for reasons that I have not investigated, dcdtrajectory file chunk and stride
# together work like this method, but HDF5/XTC do not.
xyz, box_length, box_angle = f.read(chunk, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
raise StopIteration()
in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(box_length, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(ptr, ptr+len(xyz)*stride, stride)
ptr += len(xyz)*stride
yield Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=box_length,
unitcell_angles=box_angle)
else:
t = load(filename, **kwargs)
for i in range(0, len(t), chunk):
yield t[i:i+chunk]
class Trajectory(object):
"""Container object for a molecular dynamics trajectory
A Trajectory represents a collection of one or more molecular structures,
generally (but not necessarily) from a molecular dynamics trajectory. The
Trajectory stores a number of fields describing the system through time,
including the cartesian coordinates of each atoms (``xyz``), the topology
of the molecular system (``topology``), and information about the
unitcell if appropriate (``unitcell_vectors``, ``unitcell_length``,
``unitcell_angles``).
A Trajectory should generally be constructed by loading a file from disk.
Trajectories can be loaded from (and saved to) the PDB, XTC, TRR, DCD,
binpos, NetCDF or MDTraj HDF5 formats.
Trajectory supports fancy indexing, so you can extract one or more frames
from a Trajectory as a separate trajectory. For example, to form a
trajectory with every other frame, you can slice with ``traj[::2]``.
Trajectory uses the nanometer, degree & picosecond unit system.
Examples
--------
>>> # loading a trajectory
>>> import mdtraj as md
>>> md.load('trajectory.xtc', top='native.pdb')
<mdtraj.Trajectory with 1000 frames, 22 atoms at 0x1058a73d0>
>>> # slicing a trajectory
>>> t = md.load('trajectory.h5')
>>> print(t)
<mdtraj.Trajectory with 100 frames, 22 atoms>
>>> print(t[::2])
<mdtraj.Trajectory with 50 frames, 22 atoms>
>>> # calculating the average distance between two atoms
>>> import mdtraj as md
>>> import numpy as np
>>> t = md.load('trajectory.h5')
>>> np.mean(np.sqrt(np.sum((t.xyz[:, 0, :] - t.xyz[:, 21, :])**2, axis=1)))
See Also
--------
mdtraj.load : High-level function that loads files and returns an ``md.Trajectory``
Attributes
----------
n_frames : int
n_atoms : int
n_residues : int
time : np.ndarray, shape=(n_frames,)
timestep : float
topology : md.Topology
top : md.Topology
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
unitcell_vectors : {np.ndarray, shape=(n_frames, 3, 3), None}
unitcell_lengths : {np.ndarray, shape=(n_frames, 3), None}
unitcell_angles : {np.ndarray, shape=(n_frames, 3), None}
"""
# this is NOT configurable. if it's set to something else, things will break
# (thus why I make it private)
_distance_unit = 'nanometers'
@property
def topology(self):
"""Topology of the system, describing the organization of atoms into residues, bonds, etc
Returns
-------
topology : md.Topology
The topology object, describing the organization of atoms into
residues, bonds, etc
"""
return self._topology
@topology.setter
def topology(self, value):
"Set the topology of the system, describing the organization of atoms into residues, bonds, etc"
# todo: more typechecking
self._topology = value
@property
def n_frames(self):
"""Number of frames in the trajectory
Returns
-------
n_frames : int
The number of frames in the trajectory
"""
return self._xyz.shape[0]
@property
def n_atoms(self):
"""Number of atoms in the trajectory
Returns
-------
n_atoms : int
The number of atoms in the trajectory
"""
return self._xyz.shape[1]
@property
def n_residues(self):
"""Number of residues (amino acids) in the trajectory
Returns
-------
n_residues : int
The number of residues in the trajectory's topology
"""
if self.top is None:
return 0
return sum([1 for r in self.top.residues])
@property
def top(self):
"""Alias for self.topology, describing the organization of atoms into residues, bonds, etc
Returns
-------
topology : md.Topology
The topology object, describing the organization of atoms into
residues, bonds, etc
"""
return self._topology
@top.setter
def top(self, value):
"Set the topology of the system, describing the organization of atoms into residues, bonds, etc"
# todo: more typechecking
self._topology = value
@property
def timestep(self):
"""Timestep between frames, in picoseconds
Returns
-------
timestep : float
The timestep between frames, in picoseconds.
"""
if self.n_frames <= 1:
raise(ValueError("Cannot calculate timestep if trajectory has one frame."))
return self._time[1] - self._time[0]
@property
def time(self):
"""The simulation time corresponding to each frame, in picoseconds
Returns
-------
time : np.ndarray, shape=(n_frames,)
The simulation time corresponding to each frame, in picoseconds
"""
return self._time
@time.setter
def time(self, value):
"Set the simulation time corresponding to each frame, in picoseconds"
if isinstance(value, list):
value = np.array(value)
if np.isscalar(value) and self.n_frames == 1:
value = np.array([value])
elif not value.shape == (self.n_frames,):
raise ValueError('Wrong shape. Got %s, should be %s' % (value.shape,
(self.n_frames)))
self._time = value
@property
def unitcell_vectors(self):
"""The vectors that define the shape of the unit cell in each frame
Returns
-------
vectors : np.ndarray, shape(n_frames, 3, 3)
Vectors definiing the shape of the unit cell in each frame.
The semantics of this array are that the shape of the unit cell
in frame ``i`` are given by the three vectors, ``value[i, 0, :]``,
``value[i, 1, :]``, and ``value[i, 2, :]``.
"""
if self._unitcell_lengths is None or self._unitcell_angles is None:
return None
v1, v2, v3 = lengths_and_angles_to_box_vectors(
self._unitcell_lengths[:, 0], # a
self._unitcell_lengths[:, 1], # b
self._unitcell_lengths[:, 2], # c
self._unitcell_angles[:, 0], # alpha
self._unitcell_angles[:, 1], # beta
self._unitcell_angles[:, 2], # gamma
)
return np.swapaxes(np.dstack((v1, v2, v3)), 1, 2)
@unitcell_vectors.setter
def unitcell_vectors(self, vectors):
"""Set the three vectors that define the shape of the unit cell
Parameters
----------
vectors : tuple of three arrays, each of shape=(n_frames, 3)
The semantics of this array are that the shape of the unit cell
in frame ``i`` are given by the three vectors, ``value[i, 0, :]``,
``value[i, 1, :]``, and ``value[i, 2, :]``.
"""
if vectors is None:
self._unitcell_lengths = None
self._unitcell_angles = None
return
if not len(vectors) == len(self):
raise TypeError('unitcell_vectors must be the same length as '
'the trajectory. you provided %s' % str(vectors))
v1 = vectors[:, 0, :]
v2 = vectors[:, 1, :]
v3 = vectors[:, 2, :]
a, b, c, alpha, beta, gamma = box_vectors_to_lengths_and_angles(v1, v2, v3)
self._unitcell_lengths = np.vstack((a, b, c)).T
self._unitcell_angles = np.vstack((alpha, beta, gamma)).T
@property
def unitcell_lengths(self):
"""Lengths that define the shape of the unit cell in each frame.
Returns
-------
lengths : {np.ndarray, shape=(n_frames, 3), None}
Lengths of the unit cell in each frame, in nanometers, or None
if the Trajectory contains no unitcell information.
"""
return self._unitcell_lengths
@property
def unitcell_angles(self):
"""Angles that define the shape of the unit cell in each frame.
Returns
-------
lengths : np.ndarray, shape=(n_frames, 3)
The angles between the three unitcell vectors in each frame,
``alpha``, ``beta``, and ``gamma``. ``alpha' gives the angle
between vectors ``b`` and ``c``, ``beta`` gives the angle between
vectors ``c`` and ``a``, and ``gamma`` gives the angle between
vectors ``a`` and ``b``. The angles are in degrees.
"""
return self._unitcell_angles
@unitcell_lengths.setter
def unitcell_lengths(self, value):
"""Set the lengths that define the shape of the unit cell in each frame
Parameters
----------
value : np.ndarray, shape=(n_frames, 3)
The distances ``a``, ``b``, and ``c`` that define the shape of the
unit cell in each frame, or None
"""
self._unitcell_lengths = ensure_type(value, np.float32, 2,
'unitcell_lengths', can_be_none=True, shape=(len(self), 3),
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
@unitcell_angles.setter
def unitcell_angles(self, value):
"""Set the lengths that define the shape of the unit cell in each frame
Parameters
----------
value : np.ndarray, shape=(n_frames, 3)
The angles ``alpha``, ``beta`` and ``gamma`` that define the
shape of the unit cell in each frame. The angles should be in
degrees.
"""
self._unitcell_angles = ensure_type(value, np.float32, 2,
'unitcell_angles', can_be_none=True, shape=(len(self), 3),
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
@property
def xyz(self):
"""Cartesian coordinates of each atom in each simulation frame
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
A three dimensional numpy array, with the cartesian coordinates
of each atoms in each frame.
"""
return self._xyz
@xyz.setter
def xyz(self, value):
"Set the cartesian coordinates of each atom in each simulation frame"
if self.top is not None:
# if we have a topology and its not None
shape = (None, self.topology._numAtoms, 3)
else:
shape = (None, None, 3)
value = ensure_type(value, np.float32, 3, 'xyz', shape=shape,
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
self._xyz = value
self._rmsd_traces = None
def _string_summary_basic(self):
"""Basic summary of traj in string form."""
unitcell_str = 'and unitcells' if self._have_unitcell else 'without unitcells'
value = "mdtraj.Trajectory with %d frames, %d atoms, %d residues, %s" % (
self.n_frames, self.n_atoms, self.n_residues, unitcell_str)
return value
def __len__(self):
return self.n_frames
def __add__(self, other):
"Concatenate two trajectories"
return self.join(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
# def describe(self):
# """Diagnostic summary statistics on the trajectory"""
# # What information do we want to display?
# # Goals: easy to figure out if a trajectory is blowing up or contains
# # bad data, easy to diagonose other problems. Generally give a
# # high-level description of the data in the trajectory.
# # Possibly show std. dev. of differnt coordinates in the trajectory
# # or maybe its RMSD drift or something?
# # Also, check for any NaNs or Infs in the data. Or other common issues
# # like that?
# # Note that pandas.DataFrame has a describe() method, which gives
# # min/max/mean/std.dev./percentiles of each column in a DataFrame.
# raise NotImplementedError()
def superpose(self, reference, frame=0, atom_indices=None, parallel=True):
"""Superpose each conformation in this trajectory upon a reference
Parameters
----------
reference : md.Trajectory
For each conformation in this trajectory, aligned to a particular
reference conformation in another trajectory object.
frame : int
The index of the conformation in `reference` to align to.
atom_indices : array_like, or None
The indices of the atoms to superpose. If not
supplied, all atoms will be used.
parallel : bool
Use OpenMP to run the superposition in parallel over multiple cores
Returns
-------
self
"""
if atom_indices is None:
atom_indices = slice(None)
n_frames = self.xyz.shape[0]
self_align_xyz = np.asarray(self.xyz[:, atom_indices, :], order='c')
self_displace_xyz = np.asarray(self.xyz, order='c')
ref_align_xyz = np.array(reference.xyz[frame, atom_indices, :], copy=True, order='c').reshape(1, -1, 3)
offset = np.mean(self_align_xyz, axis=1, dtype=np.float64).reshape(n_frames, 1, 3)
self_align_xyz -= offset
if self_align_xyz.ctypes.data != self_displace_xyz.ctypes.data:
# when atom_indices is None, these two arrays alias the same memory
# so we only need to do the centering once
self_displace_xyz -= offset
ref_offset = ref_align_xyz[0].astype('float64').mean(0)
ref_align_xyz[0] -= ref_offset
self_g = np.einsum('ijk,ijk->i', self_align_xyz, self_align_xyz)
ref_g = np.einsum('ijk,ijk->i', ref_align_xyz , ref_align_xyz)
_rmsd.superpose_atom_major(
ref_align_xyz, self_align_xyz, ref_g, self_g, self_displace_xyz,
0, parallel=parallel)
self.xyz = self_displace_xyz + ref_offset
return self
def join(self, other, check_topology=True, discard_overlapping_frames=False):
"""Join two trajectories together along the time/frame axis.
This method joins trajectories along the time axis, giving a new trajectory
of length equal to the sum of the lengths of `self` and `other`.
It can also be called by using `self + other`
Parameters
----------
other : Trajectory or list of Trajectory
One or more trajectories to join with this one. These trajectories
are *appended* to the end of this trajectory.
check_topology : bool
Ensure that the topology of `self` and `other` are identical before
joining them. If false, the resulting trajectory will have the
topology of `self`.
discard_overlapping_frames : bool, optional
If True, compare coordinates at trajectory edges to discard overlapping
frames. Default: False.
See Also
--------
stack : join two trajectories along the atom axis
"""
if isinstance(other, Trajectory):
other = [other]
if isinstance(other, list):
if not all(isinstance(o, Trajectory) for o in other):
raise TypeError('You can only join Trajectory instances')
if not all(self.n_atoms == o.n_atoms for o in other):
raise ValueError('Number of atoms in self (%d) is not equal '
'to number of atoms in other' % (self.n_atoms))
if check_topology and not all(self.topology == o.topology for o in other):
raise ValueError('The topologies of the Trajectories are not the same')
if not all(self._have_unitcell == o._have_unitcell for o in other):
raise ValueError('Mixing trajectories with and without unitcell')
else:
raise TypeError('`other` must be a list of Trajectory. You supplied %d' % type(other))
# list containing all of the trajs to merge, including self
trajectories = [self] + other
if discard_overlapping_frames:
for i in range(len(trajectories)-1):
# last frame of trajectory i
x0 = trajectories[i].xyz[-1]
# first frame of trajectory i+1
x1 = trajectories[i + 1].xyz[0]
# check that all atoms are within 2e-3 nm
# (this is kind of arbitrary)
if np.all(np.abs(x1 - x0) < 2e-3):
trajectories[i] = trajectories[i][:-1]
xyz = np.concatenate([t.xyz for t in trajectories])
time = np.concatenate([t.time for t in trajectories])
angles = lengths = None
if self._have_unitcell:
angles = np.concatenate([t.unitcell_angles for t in trajectories])
lengths = np.concatenate([t.unitcell_lengths for t in trajectories])
# use this syntax so that if you subclass Trajectory,
# the subclass's join() will return an instance of the subclass
return self.__class__(xyz, deepcopy(self._topology), time=time,
unitcell_lengths=lengths, unitcell_angles=angles)
def stack(self, other):
"""Stack two trajectories along the atom axis
This method joins trajectories along the atom axis, giving a new trajectory
with a number of atoms equal to the sum of the number of atoms in
`self` and `other`.
Notes
-----
The resulting trajectory will have the unitcell and time information
the left operand.
Examples
--------
>>> t1 = md.load('traj1.h5')
>>> t2 = md.load('traj2.h5')
>>> # even when t2 contains no unitcell information
>>> t2.unitcell_vectors = None
>>> stacked = t1.stack(t2)
>>> # the stacked trajectory inherits the unitcell information
>>> # from the first trajectory
>>> np.all(stacked.unitcell_vectors == t1.unitcell_vectors)
True
Parameters
----------
other : Trajectory
The other trajectory to join
See Also
--------
join : join two trajectories along the time/frame axis.
"""
if not isinstance(other, Trajectory):
raise TypeError('You can only stack two Trajectory instances')
if self.n_frames != other.n_frames:
raise ValueError('Number of frames in self (%d) is not equal '
'to number of frames in other (%d)' % (self.n_frames, other.n_frames))
if self.topology is not None:
topology = self.topology.join(other.topology)
else:
topology = None
xyz = np.hstack((self.xyz, other.xyz))
return self.__class__(xyz=xyz, topology=topology, unitcell_angles=self.unitcell_angles,
unitcell_lengths=self.unitcell_lengths, time=self.time)
def __getitem__(self, key):
"Get a slice of this trajectory"
return self.slice(key)
def slice(self, key, copy=True):
"""Slice trajectory, by extracting one or more frames into a separate object
This method can also be called using index bracket notation, i.e
`traj[1] == traj.slice(1)`
Parameters
----------
key : {int, np.ndarray, slice}
The slice to take. Can be either an int, a list of ints, or a slice
object.
copy : bool, default=True
Copy the arrays after slicing. If you set this to false, then if
you modify a slice, you'll modify the original array since they
point to the same data.
"""
xyz = self.xyz[key]
time = self.time[key]
unitcell_lengths, unitcell_angles = None, None
if self.unitcell_angles is not None:
unitcell_angles = self.unitcell_angles[key]
if self.unitcell_lengths is not None:
unitcell_lengths = self.unitcell_lengths[key]
if copy:
xyz = xyz.copy()
time = time.copy()
topology = deepcopy(self._topology)
if self.unitcell_angles is not None:
unitcell_angles = unitcell_angles.copy()
if self.unitcell_lengths is not None:
unitcell_lengths = unitcell_lengths.copy()
newtraj = self.__class__(xyz, topology, time, unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
return newtraj
def __init__(self, xyz, topology, time=None, unitcell_lengths=None, unitcell_angles=None):
# install the topology into the object first, so that when setting
# the xyz, we can check that it lines up (e.g. n_atoms), with the topology
self.topology = topology
self.xyz = xyz
# _rmsd_traces are the inner product of each centered conformation,
# which are required for computing RMSD. Normally these values are
# calculated on the fly in the cython code (rmsd/_rmsd.pyx), but
# optionally, we enable the use precomputed values which can speed
# up the calculation (useful for clustering), but potentially be unsafe
# if self._xyz is modified without a corresponding change to
# self._rmsd_traces. This array is populated computed by
# center_conformations, and no other methods should really touch it.
self._rmsd_traces = None
# box has no default, it'll just be none normally
self.unitcell_lengths = unitcell_lengths
self.unitcell_angles = unitcell_angles
# time will take the default 1..N
if time is None:
time = np.arange(len(self.xyz))
self.time = time
if (topology is not None) and (topology._numAtoms != self.n_atoms):
raise ValueError("Number of atoms in xyz (%s) and "
"in topology (%s) don't match" % (self.n_atoms, topology._numAtoms))
def openmm_positions(self, frame):
"""OpenMM-compatable positions of a single frame.
Examples
--------
>>> t = md.load('trajectory.h5')
>>> context.setPositions(t.openmm_positions(0))
Parameters
----------
frame : int
The index of frame of the trajectory that you wish to extract
Returns
-------
positions : list
The cartesian coordinates of specific trajectory frame, formatted
for input to OpenMM
"""
from simtk.openmm import Vec3
from simtk.unit import nanometer
Pos = []
for xyzi in self.xyz[frame]:
Pos.append(Vec3(xyzi[0], xyzi[1], xyzi[2]))
return Pos * nanometer
def openmm_boxes(self, frame):
"""OpenMM-compatable box vectors of a single frame.
Examples
--------
>>> t = md.load('trajectory.h5')
>>> context.setPeriodicBoxVectors(t.openmm_positions(0))
Parameters
----------
frame : int
Return box for this single frame.
Returns
-------
box : tuple
The periodic box vectors for this frame, formatted for input to
OpenMM.
"""
from simtk.openmm import Vec3
from simtk.unit import nanometer
vectors = self[frame].unitcell_vectors
if vectors is None:
raise ValueError("this trajectory does not contain box size information")
v1, v2, v3 = vectors
return (Vec3(*v1), Vec3(*v2), Vec3(*v3)) * nanometer
@staticmethod
# im not really sure if the load function should be just a function or a method on the class
# so effectively, lets make it both?
def load(filenames, **kwargs):
"""Load a trajectory from disk
Parameters
----------
filenames : {str, [str]}
Either a string or list of strings
Other Parameters
----------------
As requested by the various load functions -- it depends on the extension
"""
return load(filenames, **kwargs)
def save(self, filename, **kwargs):
"""Save trajectory to disk, in a format determined by the filename extension
Parameters
----------
filename : str
filesystem path in which to save the trajectory. The extension will
be parsed and will control the format.
Other Parameters
----------------
lossy : bool
For .h5 or .lh5, whether or not to use compression.
no_models: bool
For .pdb. TODO: Document this?
force_overwrite : bool
For .binpos, .xtc, .dcd. If `filename` already exists, overwrite it.
"""
# grab the extension of the filename
extension = os.path.splitext(filename)[1]
savers = {'.xtc': self.save_xtc,
'.trr': self.save_trr,
'.pdb': self.save_pdb,
'.dcd': self.save_dcd,
'.h5': self.save_hdf5,
'.binpos': self.save_binpos,
'.nc': self.save_netcdf,
'.netcdf': self.save_netcdf,
'.crd': self.save_mdcrd,
'.mdcrd': self.save_mdcrd,
'.ncdf': self.save_netcdf,
'.lh5': self.save_lh5,
}
try:
saver = savers[extension]
except KeyError:
raise IOError('Sorry, no saver for filename=%s (extension=%s) '
'was found. I can only save files '
'with extensions in %s' % (filename, extension, savers.keys()))
# run the saver, and return whatever output it gives
return saver(filename, **kwargs)
def save_hdf5(self, filename, force_overwrite=True):
"""Save trajectory to MDTraj HDF5 format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with HDF5TrajectoryFile(filename, 'w', force_overwrite=True) as f:
f.write(coordinates=self.xyz, time=self.time,
cell_angles=self.unitcell_angles,
cell_lengths=self.unitcell_lengths)
f.topology = self.topology
def save_pdb(self, filename, force_overwrite=True):
"""Save trajectory to RCSB PDB format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
self._check_valid_unitcell()
with PDBTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
for i in xrange(self.n_frames):
if self._have_unitcell:
f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit),
self.topology,
modelIndex=i,
unitcell_lengths=in_units_of(self.unitcell_lengths[i], Trajectory._distance_unit, f.distance_unit),
unitcell_angles=self.unitcell_angles[i])
else:
f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit),
self.topology,
modelIndex=i)
def save_xtc(self, filename, force_overwrite=True):
"""Save trajectory to Gromacs XTC format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with XTCTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=self.xyz, time=self.time, box=self.unitcell_vectors)
def save_trr(self, filename, force_overwrite=True):
"""Save trajectory to Gromacs TRR format
Notes
-----
Only the xyz coordinates and the time are saved, the velocities
and forces in the trr will be zeros
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with TRRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(xyz=self.xyz, time=self.time, box=self.unitcell_vectors)
def save_dcd(self, filename, force_overwrite=True):
"""Save trajectory to CHARMM/NAMD DCD format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filenames, if its already there
"""
self._check_valid_unitcell()
with DCDTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles)
def save_binpos(self, filename, force_overwrite=True):
"""Save trajectory to AMBER BINPOS format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
with BINPOSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit))
def save_mdcrd(self, filename, force_overwrite=True):
"""Save trajectory to AMBER mdcrd format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
self._check_valid_unitcell()
if self._have_unitcell:
if not np.all(self.unitcell_angles == 90):
raise ValueError('Only rectilinear boxes can be saved to mdcrd files')
with MDCRDTrajectoryFile(filename, mode='w', force_overwrite=force_overwrite) as f:
f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),
in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit))
def save_netcdf(self, filename, force_overwrite=True):
"""Save trajectory in AMBER NetCDF format
Parameters
----------
filename : str
filesystem path in which to save the trajectory
force_overwrite : bool, default=True
Overwrite anything that exists at filename, if its already there
"""
self._check_valid_unitcell()
with NetCDFTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:
f.write(coordinates=in_units_of(self._xyz, Trajectory._distance_unit, NetCDFTrajectoryFile.distance_unit),
time=self.time,
cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),
cell_angles=self.unitcell_angles)
def save_lh5(self, filename):
"""Save trajectory in deprecated MSMBuilder2 LH5 (lossy HDF5) format.
Parameters
----------
filename : str
filesystem path in which to save the trajectory
"""
with LH5TrajectoryFile(filename, 'w', force_overwrite=True) as f:
f.write(coordinates=self.xyz)
f.topology = self.topology
def center_coordinates(self, mass_weighted=False):
"""Center each trajectory frame at the origin (0,0,0).
This method acts inplace on the trajectory. The centering can
be either uniformly weighted (mass_weighted=False) or weighted by
the mass of each atom (mass_weighted=True).
Parameters
----------
mass_weighted : bool, optional (default = False)
If True, weight atoms by mass when removing COM.
Returns
-------
self
"""
if mass_weighted and self.top is not None:
masses = np.array([a.element.mass for a in self.top.atoms])
masses /= masses.sum()
for x in self._xyz:
x -= (x.astype('float64').T.dot(masses))
else:
self._rmsd_traces = _rmsd._center_inplace_atom_major(self._xyz)
return self
def restrict_atoms(self, atom_indices):
"""Retain only a subset of the atoms in a trajectory (inplace)
Deletes atoms not in `atom_indices`, and re-indexes those that remain
Parameters
----------
atom_indices : list([int])
List of atom indices to keep.
Returns
-------
self
"""
if self._topology is not None:
self._topology = self._topology.subset(atom_indices)
self._xyz = np.array(self.xyz[:,atom_indices], order='C')
return self
def _check_valid_unitcell(self):
"""Do some sanity checking on self.unitcell_lengths and self.unitcell_angles
"""
if self.unitcell_lengths is not None and self.unitcell_angles is None:
raise AttributeError('unitcell length data exists, but no angles')
if self.unitcell_lengths is None and self.unitcell_angles is not None:
raise AttributeError('unitcell angles data exists, but no lengths')
if self.unitcell_lengths is not None and np.any(self.unitcell_lengths < 0):
raise ValueError('unitcell length < 0')
if self.unitcell_angles is not None and np.any(self.unitcell_angles < 0):
raise ValueError('unitcell angle < 0')
@property
def _have_unitcell(self):
return self._unitcell_lengths is not None and self._unitcell_angles is not None
| lgpl-2.1 |
SergioGonzalezSanz/conformal_predictors | tests/nc_measures/SVMTest.py | 1 | 1492 | import unittest
from conformal_predictors.nc_measures.SVM import SVCDistanceNCMeasure
from sklearn.svm import SVC
from numpy import array
class SVMTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
x = array([[1, 1], [2, 2]])
y = array([0, 1])
measure = SVCDistanceNCMeasure()
clf = SVC(decision_function_shape='ovr')
clf.fit(x, y)
measures = measure.evaluate(clf, x)
self.assertAlmostEqual(measures[0, 0], -.63212056)
self.assertAlmostEqual(measures[0, 1], .63212056)
self.assertAlmostEqual(measures[1, 0], .63212056)
self.assertAlmostEqual(measures[1, 1], -.63212056)
def tests_2(self):
x = array([[1, 1], [2, 2], [3, 3]])
y = array([0, 1, 2])
measure = SVCDistanceNCMeasure()
clf = SVC(decision_function_shape='ovr')
clf.fit(x, y)
measures = measure.evaluate(clf, x)
self.assertAlmostEqual(measures[0, 0], -1.5)
self.assertAlmostEqual(measures[0, 1], 1.08754365)
self.assertAlmostEqual(measures[0, 2], .41245635)
self.assertAlmostEqual(measures[1, 0], 1.19584788)
self.assertAlmostEqual(measures[1, 1], -1.60830423)
self.assertAlmostEqual(measures[1, 2], .19584788)
self.assertAlmostEqual(measures[2, 0], .41245635)
self.assertAlmostEqual(measures[2, 1], 1.08754365)
self.assertAlmostEqual(measures[2, 2], -1.5)
| mit |
altairpearl/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 25 | 11187 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
aewhatley/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
bmcfee/librosa | librosa/util/utils.py | 1 | 64787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions"""
import warnings
import scipy.ndimage
import scipy.sparse
import numpy as np
import numba
from numpy.lib.stride_tricks import as_strided
from .._cache import cache
from .exceptions import ParameterError
# Constrain STFT block sizes to 256 KB
MAX_MEM_BLOCK = 2 ** 8 * 2 ** 10
__all__ = [
"MAX_MEM_BLOCK",
"frame",
"pad_center",
"fix_length",
"valid_audio",
"valid_int",
"valid_intervals",
"fix_frames",
"axis_sort",
"localmax",
"localmin",
"normalize",
"peak_pick",
"sparsify_rows",
"shear",
"stack",
"fill_off_diagonal",
"index_to_slice",
"sync",
"softmask",
"buf_to_float",
"tiny",
"cyclic_gradient",
"dtype_r2c",
"dtype_c2r",
]
def frame(x, frame_length, hop_length, axis=-1):
"""Slice a data array into (overlapping) frames.
This implementation uses low-level stride manipulation to avoid
making a copy of the data. The resulting frame representation
is a new view of the same input data.
However, if the input data is not contiguous in memory, a warning
will be issued and the output will be a full copy, rather than
a view of the input data.
For example, a one-dimensional input ``x = [0, 1, 2, 3, 4, 5, 6]``
can be framed with frame length 3 and hop length 2 in two ways.
The first (``axis=-1``), results in the array ``x_frames``::
[[0, 2, 4],
[1, 3, 5],
[2, 4, 6]]
where each column ``x_frames[:, i]`` contains a contiguous slice of
the input ``x[i * hop_length : i * hop_length + frame_length]``.
The second way (``axis=0``) results in the array ``x_frames``::
[[0, 1, 2],
[2, 3, 4],
[4, 5, 6]]
where each row ``x_frames[i]`` contains a contiguous slice of the input.
This generalizes to higher dimensional inputs, as shown in the examples below.
In general, the framing operation increments by 1 the number of dimensions,
adding a new "frame axis" either to the end of the array (``axis=-1``)
or the beginning of the array (``axis=0``).
Parameters
----------
x : np.ndarray
Array to frame
frame_length : int > 0 [scalar]
Length of the frame
hop_length : int > 0 [scalar]
Number of steps to advance between frames
axis : 0 or -1
The axis along which to frame.
If ``axis=-1`` (the default), then ``x`` is framed along its last dimension.
``x`` must be "F-contiguous" in this case.
If ``axis=0``, then ``x`` is framed along its first dimension.
``x`` must be "C-contiguous" in this case.
Returns
-------
x_frames : np.ndarray [shape=(..., frame_length, N_FRAMES) or (N_FRAMES, frame_length, ...)]
A framed view of ``x``, for example with ``axis=-1`` (framing on the last dimension)::
x_frames[..., j] == x[..., j * hop_length : j * hop_length + frame_length]
If ``axis=0`` (framing on the first dimension), then::
x_frames[j] = x[j * hop_length : j * hop_length + frame_length]
Raises
------
ParameterError
If ``x`` is not an `np.ndarray`.
If ``x.shape[axis] < frame_length``, there is not enough data to fill one frame.
If ``hop_length < 1``, frames cannot advance.
If ``axis`` is not 0 or -1. Framing is only supported along the first or last axis.
See Also
--------
numpy.asfortranarray : Convert data to F-contiguous representation
numpy.ascontiguousarray : Convert data to C-contiguous representation
numpy.ndarray.flags : information about the memory layout of a numpy `ndarray`.
Examples
--------
Extract 2048-sample frames from monophonic signal with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
>>> frames
array([[-1.407e-03, -2.604e-02, ..., -1.795e-05, -8.108e-06],
[-4.461e-04, -3.721e-02, ..., -1.573e-05, -1.652e-05],
...,
[ 7.960e-02, -2.335e-01, ..., -6.815e-06, 1.266e-05],
[ 9.568e-02, -1.252e-01, ..., 7.397e-06, -1.921e-05]],
dtype=float32)
>>> y.shape
(117601,)
>>> frames.shape
(2048, 1806)
Or frame along the first axis instead of the last:
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64, axis=0)
>>> frames.shape
(1806, 2048)
Frame a stereo signal:
>>> y, sr = librosa.load(librosa.ex('trumpet', hq=True), mono=False)
>>> y.shape
(2, 117601)
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
(2, 2048, 1806)
Carve an STFT into fixed-length patches of 32 frames with 50% overlap
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> S.shape
(1025, 230)
>>> S_patch = librosa.util.frame(S, frame_length=32, hop_length=16)
>>> S_patch.shape
(1025, 32, 13)
>>> # The first patch contains the first 32 frames of S
>>> np.allclose(S_patch[:, :, 0], S[:, :32])
True
>>> # The second patch contains frames 16 to 16+32=48, and so on
>>> np.allclose(S_patch[:, :, 1], S[:, 16:48])
True
"""
if not isinstance(x, np.ndarray):
raise ParameterError(
"Input must be of type numpy.ndarray, " "given type(x)={}".format(type(x))
)
if x.shape[axis] < frame_length:
raise ParameterError(
"Input is too short (n={:d})"
" for frame_length={:d}".format(x.shape[axis], frame_length)
)
if hop_length < 1:
raise ParameterError("Invalid hop_length: {:d}".format(hop_length))
if axis == -1 and not x.flags["F_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.asfortranarray(x)
elif axis == 0 and not x.flags["C_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.ascontiguousarray(x)
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize
if axis == -1:
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * new_stride]
elif axis == 0:
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * new_stride] + list(strides)
else:
raise ParameterError("Frame axis={} must be either 0 or -1".format(axis))
return as_strided(x, shape=shape, strides=strides)
@cache(level=20)
def valid_audio(y, mono=True):
"""Determine whether a variable contains valid audio data.
If ``mono=True``, then ``y`` is only considered valid if it has shape
``(N,)`` (number of samples).
If ``mono=False``, then ``y`` may be either monophonic, or have shape
``(2, N)`` (stereo) or ``(K, N)`` for ``K>=2`` for general multi-channel.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to require monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
In any of these cases:
- ``type(y)`` is not ``np.ndarray``
- ``y.dtype`` is not floating-point
- ``mono == True`` and ``y.ndim`` is not 1
- ``mono == False`` and ``y.ndim`` is not 1 or 2
- ``mono == False`` and ``y.ndim == 2`` but ``y.shape[0] == 1``
- ``np.isfinite(y).all()`` is False
Notes
-----
This function caches at level 20.
Examples
--------
>>> # By default, valid_audio allows only mono signals
>>> filepath = librosa.ex('trumpet', hq=True)
>>> y_mono, sr = librosa.load(filepath, mono=True)
>>> y_stereo, _ = librosa.load(filepath, mono=False)
>>> librosa.util.valid_audio(y_mono), librosa.util.valid_audio(y_stereo)
True, False
>>> # To allow stereo signals, set mono=False
>>> librosa.util.valid_audio(y_stereo, mono=False)
True
See also
--------
numpy.float32
"""
if not isinstance(y, np.ndarray):
raise ParameterError("Audio data must be of type numpy.ndarray")
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError("Audio data must be floating-point")
if mono and y.ndim != 1:
raise ParameterError(
"Invalid shape for monophonic audio: "
"ndim={:d}, shape={}".format(y.ndim, y.shape)
)
elif y.ndim > 2 or y.ndim == 0:
raise ParameterError(
"Audio data must have shape (samples,) or (channels, samples). "
"Received shape={}".format(y.shape)
)
elif y.ndim == 2 and y.shape[0] < 2:
raise ParameterError(
"Mono data must have shape (samples,). " "Received shape={}".format(y.shape)
)
if not np.isfinite(y).all():
raise ParameterError("Audio buffer is not finite everywhere")
return True
def valid_int(x, cast=None):
"""Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify ``x`` before casting.
Default: `np.floor`
Returns
-------
x_int : int
``x_int = int(cast(x))``
Raises
------
ParameterError
If ``cast`` is provided and is not callable.
"""
if cast is None:
cast = np.floor
if not callable(cast):
raise ParameterError("cast parameter must be callable")
return int(cast(x))
def valid_intervals(intervals):
"""Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if ``intervals`` passes validation.
"""
if intervals.ndim != 2 or intervals.shape[-1] != 2:
raise ParameterError("intervals must have shape (n, 2)")
if np.any(intervals[:, 0] > intervals[:, 1]):
raise ParameterError(
"intervals={} must have non-negative durations".format(intervals)
)
return True
def pad_center(data, size, axis=-1, **kwargs):
"""Pad an array to a target length along a target axis.
This differs from `np.pad` by centering the data prior to padding,
analogous to `str.center`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad ``data``
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad`
Returns
-------
data_padded : np.ndarray
``data`` centered and padded to length ``size`` along the
specified axis
Raises
------
ParameterError
If ``size < data.shape[axis]``
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise ParameterError(
("Target size ({:d}) must be " "at least input size ({:d})").format(size, n)
)
return np.pad(data, lengths, **kwargs)
def fix_length(data, size, axis=-1, **kwargs):
"""Fix the length an array ``data`` to exactly ``size`` along a target axis.
If ``data.shape[axis] < n``, pad according to the provided kwargs.
By default, ``data`` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to ``np.pad``
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
``data`` either trimmed or padded to length ``size``
along the specified axis.
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
def fix_frames(frames, x_min=0, x_max=None, pad=True):
"""Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If ``True``, then ``frames`` is expanded to span the full range
``[x_min, x_max]``
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If ``frames`` contains negative values
"""
frames = np.asarray(frames)
if np.any(frames < 0):
raise ParameterError("Negative frame index detected")
if pad and (x_min is not None or x_max is not None):
frames = np.clip(frames, x_min, x_max)
if pad:
pad_data = []
if x_min is not None:
pad_data.append(x_min)
if x_max is not None:
pad_data.append(x_max)
frames = np.concatenate((pad_data, frames))
if x_min is not None:
frames = frames[frames >= x_min]
if x_max is not None:
frames = frames[frames <= x_max]
return np.unique(frames).astype(int)
def axis_sort(S, axis=-1, index=False, value=None):
"""Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=64)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, ncols=2)
>>> img_w = librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log', ax=ax[0, 0])
>>> ax[0, 0].set(title='W')
>>> ax[0, 0].label_outer()
>>> img_act = librosa.display.specshow(H, x_axis='time', ax=ax[0, 1])
>>> ax[0, 1].set(title='H')
>>> ax[0, 1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log', ax=ax[1, 0])
>>> ax[1, 0].set(title='W sorted')
>>> librosa.display.specshow(H_sort, x_axis='time', ax=ax[1, 1])
>>> ax[1, 1].set(title='H sorted')
>>> ax[1, 1].label_outer()
>>> fig.colorbar(img_w, ax=ax[:, 0], orientation='horizontal')
>>> fig.colorbar(img_act, ax=ax[:, 1], orientation='horizontal')
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- ``axis=0`` to sort rows by peak column index
- ``axis=1`` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
``S`` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If ``index == True``, the sorting index used to permute ``S``.
Length of ``idx`` corresponds to the selected ``axis``.
Raises
------
ParameterError
If ``S`` does not have exactly 2 dimensions (``S.ndim != 2``)
"""
if value is None:
value = np.argmax
if S.ndim != 2:
raise ParameterError("axis_sort is only defined for 2D arrays")
bin_idx = value(S, axis=np.mod(1 - axis, S.ndim))
idx = np.argsort(bin_idx)
sort_slice = [slice(None)] * S.ndim
sort_slice[axis] = idx
if index:
return S[tuple(sort_slice)], idx
else:
return S[tuple(sort_slice)]
@cache(level=40)
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
"""Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that::
norm(S, axis=axis) == 1
For example, ``axis=0`` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, ``axis=1`` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
``threshold`` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : mininum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least ``threshold`` are
normalized.
By default, the threshold is determined from
the numerical precision of ``S.dtype``.
fill : None or bool
If None, then columns (or rows) with norm below ``threshold``
are left as is.
If False, then columns (rows) with norm below ``threshold``
are set to 0.
If True, then columns (rows) with norm below ``threshold``
are filled uniformly such that the corresponding norm is 1.
.. note:: ``fill=True`` is incompatible with ``norm=0`` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If ``norm`` is not among the valid types defined above
If ``S`` is not finite
If ``fill=True`` and ``norm=0``
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
"""
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise ParameterError(
"threshold={} must be strictly " "positive".format(threshold)
)
if fill not in [None, False, True]:
raise ParameterError("fill={} must be None or boolean".format(fill))
if not np.all(np.isfinite(S)):
raise ParameterError("Input must be finite")
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise ParameterError("Cannot normalize with norm=0 and fill=True")
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag ** norm, axis=axis, keepdims=True) ** (1.0 / norm)
if axis is None:
fill_norm = mag.size ** (-1.0 / norm)
else:
fill_norm = mag.shape[axis] ** (-1.0 / norm)
elif norm is None:
return S
else:
raise ParameterError("Unsupported norm: {}".format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
def localmax(x, axis=0):
"""Find local maxima in an array
An element ``x[i]`` is considered a local maximum if the following
conditions are met:
- ``x[i] > x[i-1]``
- ``x[i] >= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along ``axis``
See Also
--------
localmin
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)])
def localmin(x, axis=0):
"""Find local minima in an array
An element ``x[i]`` is considered a local minimum if the following
conditions are met:
- ``x[i] < x[i-1]``
- ``x[i] <= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local minimum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmin(x)
array([False, True, False, False, True, False, True, False])
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmin(x, axis=0)
array([[False, False, False],
[False, True, True],
[False, False, False]])
>>> librosa.util.localmin(x, axis=1)
array([[False, True, False],
[False, True, False],
[False, True, False]])
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local minimality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local minimality along ``axis``
See Also
--------
localmax
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x < x_pad[tuple(inds1)]) & (x <= x_pad[tuple(inds2)])
def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
"""Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding ``x[n]``
fulfills the following three conditions:
1. ``x[n] == max(x[n - pre_max:n + post_max])``
2. ``x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta``
3. ``n - previous_n > wait``
where ``previous_n`` is the last sample picked as a peak (greedily).
This implementation is based on [#]_ and [#]_.
.. [#] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [#] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before ``n`` over which max is computed
post_max : int >= 1 [scalar]
number of samples after ``n`` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before ``n`` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after ``n`` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in ``x``
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 3, 27, 40, 61, 72, 88, 103])
>>> import matplotlib.pyplot as plt
>>> times = librosa.times_like(onset_env, sr=sr, hop_length=512)
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> D = np.abs(librosa.stft(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[1])
>>> ax[0].plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> ax[0].vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> ax[0].legend(frameon=True, framealpha=0.8)
>>> ax[0].label_outer()
"""
if pre_max < 0:
raise ParameterError("pre_max must be non-negative")
if pre_avg < 0:
raise ParameterError("pre_avg must be non-negative")
if delta < 0:
raise ParameterError("delta must be non-negative")
if wait < 0:
raise ParameterError("wait must be non-negative")
if post_max <= 0:
raise ParameterError("post_max must be positive")
if post_avg <= 0:
raise ParameterError("post_avg must be positive")
if x.ndim != 1:
raise ParameterError("input array must be one-dimensional")
# Ensure valid index types
pre_max = valid_int(pre_max, cast=np.ceil)
post_max = valid_int(post_max, cast=np.ceil)
pre_avg = valid_int(pre_avg, cast=np.ceil)
post_avg = valid_int(post_avg, cast=np.ceil)
wait = valid_int(wait, cast=np.ceil)
# Get the maximum of the signal over a sliding window
max_length = pre_max + post_max
max_origin = np.ceil(0.5 * (pre_max - post_max))
# Using mode='constant' and cval=x.min() effectively truncates
# the sliding window at the boundaries
mov_max = scipy.ndimage.filters.maximum_filter1d(
x, int(max_length), mode="constant", origin=int(max_origin), cval=x.min()
)
# Get the mean of the signal over a sliding window
avg_length = pre_avg + post_avg
avg_origin = np.ceil(0.5 * (pre_avg - post_avg))
# Here, there is no mode which results in the behavior we want,
# so we'll correct below.
mov_avg = scipy.ndimage.filters.uniform_filter1d(
x, int(avg_length), mode="nearest", origin=int(avg_origin)
)
# Correct sliding average at the beginning
n = 0
# Only need to correct in the range where the window needs to be truncated
while n - pre_avg < 0 and n < x.shape[0]:
# This just explicitly does mean(x[n - pre_avg:n + post_avg])
# with truncation
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# Correct sliding average at the end
n = x.shape[0] - post_avg
# When post_avg > x.shape[0] (weird case), reset to 0
n = n if n > 0 else 0
while n < x.shape[0]:
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# First mask out all entries not equal to the local max
detections = x * (x == mov_max)
# Then mask out all entries less than the thresholded average
detections = detections * (detections >= (mov_avg + delta))
# Initialize peaks array, to be filled greedily
peaks = []
# Remove onsets which are close together in time
last_onset = -np.inf
for i in np.nonzero(detections)[0]:
# Only report an onset if the "wait" samples was reported
if i > last_onset + wait:
peaks.append(i)
# Save last reported onset
last_onset = i
return np.array(peaks)
@cache(level=40)
def sparsify_rows(x, quantile=0.01, dtype=None):
"""Return a row-sparse matrix approximating the input
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of ``x``
dtype : np.dtype, optional
The dtype of the output array.
If not provided, then ``x.dtype`` will be used.
Returns
-------
x_sparse : ``scipy.sparse.csr_matrix`` [shape=x.shape]
Row-sparsified approximation of ``x``
If ``x.ndim == 1``, then ``x`` is interpreted as a row vector,
and ``x_sparse.shape == (1, len(x))``.
Raises
------
ParameterError
If ``x.ndim > 2``
If ``quantile`` lies outside ``[0, 1.0)``
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
"""
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError(
"Input must have 2 or fewer dimensions. "
"Provided x.shape={}.".format(x.shape)
)
if not 0.0 <= quantile < 1:
raise ParameterError("Invalid quantile {:.2f}".format(quantile))
if dtype is None:
dtype = x.dtype
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = "<i{:d}".format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True):
"""Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min, idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad ``idx`` to span the range ``idx_min:idx_max``.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
``idx_min`` and ``idx_max``.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
"""
# First, normalize the index set
idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad)
# Now convert the indices to slices
return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])]
@cache(level=40)
def sync(data, idx, aggregate=None, pad=True, axis=-1):
"""Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to ``idx``.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, ``idx`` is padded to span the full range ``[0, data.shape[axis]]``
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
``data_sync`` will have the same dimension as ``data``, except that the ``axis``
coordinate will be reduced according to ``idx``.
For example, a 2-dimensional ``data`` with ``axis=-1`` should satisfy::
data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.ex('choice'))
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> fig, ax = plt.subplots(nrows=3, sharex=True, sharey=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time', ax=ax[0])
>>> ax[0].set(title='CQT power, shape={}'.format(C.shape))
>>> ax[0].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time', ax=ax[1])
>>> ax[1].set(title='Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> ax[1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time', ax=ax[2])
>>> ax[2].set(title='Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
"""
if aggregate is None:
aggregate = np.mean
shape = list(data.shape)
if np.all([isinstance(_, slice) for _ in idx]):
slices = idx
elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]):
slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad)
else:
raise ParameterError("Invalid index set: {}".format(idx))
agg_shape = list(shape)
agg_shape[axis] = len(slices)
data_agg = np.empty(
agg_shape, order="F" if np.isfortran(data) else "C", dtype=data.dtype
)
idx_in = [slice(None)] * data.ndim
idx_agg = [slice(None)] * data_agg.ndim
for (i, segment) in enumerate(slices):
idx_in[axis] = segment
idx_agg[axis] = i
data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis)
return data_agg
def softmask(X, X_ref, power=1, split_zeros=False):
"""Robustly compute a soft-mask operation.
``M = X**power / (X**power + X_ref**power)``
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as ``X``.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to ``X > X_ref``.
Note: for hard masks, ties are always broken in favor of ``X_ref`` (``mask=0``).
split_zeros : bool
If `True`, entries where ``X`` and ``X_ref`` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=X.shape
The output mask array
Raises
------
ParameterError
If ``X`` and ``X_ref`` have different shapes.
If ``X`` or ``X_ref`` are negative anywhere
If ``power <= 0``
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
"""
if X.shape != X_ref.shape:
raise ParameterError("Shape mismatch: {}!={}".format(X.shape, X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError("X and X_ref must be non-negative")
if power <= 0:
raise ParameterError("power must be strictly positive")
# We're working with ints, cast to float.
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
# Re-scale the input arrays relative to the larger value
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = Z < np.finfo(dtype).tiny
Z[bad_idx] = 1
# For finite power, compute the softmask
if np.isfinite(power):
mask = (X / Z) ** power
ref_mask = (X_ref / Z) ** power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
# Wherever energy is below energy in both inputs, split the mask
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
# Otherwise, compute the hard mask
mask = X > X_ref
return mask
def tiny(x):
"""Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in ``x.dtype``
(e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is ``x.dtype``
Returns
-------
tiny_value : float
The smallest positive usable number for the type of ``x``.
If ``x`` is integer-typed, then the tiny value for ``np.float32``
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
"""
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(
x.dtype, np.complexfloating
):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
def cyclic_gradient(data, edge_order=1, axis=-1):
"""Estimate the gradient of a function over a uniformly sampled,
periodic domain.
This is essentially the same as `np.gradient`, except that edge effects
are handled by wrapping the observations (i.e. assuming periodicity)
rather than extrapolation.
Parameters
----------
data : np.ndarray
The function values observed at uniformly spaced positions on
a periodic domain
edge_order: {1, 2}
The order of the difference approximation used for estimating
the gradient
axis : int
The axis along which gradients are calculated.
Returns
-------
grad : np.ndarray like ``data``
The gradient of ``data`` taken along the specified axis.
See Also
--------
numpy.gradient
Examples
--------
This example estimates the gradient of cosine (-sine) from 64
samples using direct (aperiodic) and periodic gradient
calculation.
>>> import matplotlib.pyplot as plt
>>> x = 2 * np.pi * np.linspace(0, 1, num=64, endpoint=False)
>>> y = np.cos(x)
>>> grad = np.gradient(y)
>>> cyclic_grad = librosa.util.cyclic_gradient(y)
>>> true_grad = -np.sin(x) * 2 * np.pi / len(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, true_grad, label='True gradient', linewidth=5,
... alpha=0.35)
>>> ax.plot(x, cyclic_grad, label='cyclic_gradient')
>>> ax.plot(x, grad, label='np.gradient', linestyle=':')
>>> ax.legend()
>>> # Zoom into the first part of the sequence
>>> ax.set(xlim=[0, np.pi/16], ylim=[-0.025, 0.025])
"""
# Wrap-pad the data along the target axis by `edge_order` on each side
padding = [(0, 0)] * data.ndim
padding[axis] = (edge_order, edge_order)
data_pad = np.pad(data, padding, mode="wrap")
# Compute the gradient
grad = np.gradient(data_pad, edge_order=edge_order, axis=axis)
# Remove the padding
slices = [slice(None)] * data.ndim
slices[axis] = slice(edge_order, -edge_order)
return grad[tuple(slices)]
@numba.jit(nopython=True, cache=True)
def __shear_dense(X, factor=+1, axis=-1):
"""Numba-accelerated shear for dense (ndarray) arrays"""
if axis == 0:
X = X.T
X_shear = np.empty_like(X)
for i in range(X.shape[1]):
X_shear[:, i] = np.roll(X[:, i], factor * i)
if axis == 0:
X_shear = X_shear.T
return X_shear
def __shear_sparse(X, factor=+1, axis=-1):
"""Fast shearing for sparse matrices
Shearing is performed using CSC array indices,
and the result is converted back to whatever sparse format
the data was originally provided in.
"""
fmt = X.format
if axis == 0:
X = X.T
# Now we're definitely rolling on the correct axis
X_shear = X.tocsc(copy=True)
# The idea here is to repeat the shear amount (factor * range)
# by the number of non-zeros for each column.
# The number of non-zeros is computed by diffing the index pointer array
roll = np.repeat(factor * np.arange(X_shear.shape[1]), np.diff(X_shear.indptr))
# In-place roll
np.mod(X_shear.indices + roll, X_shear.shape[0], out=X_shear.indices)
if axis == 0:
X_shear = X_shear.T
# And convert back to the input format
return X_shear.asformat(fmt)
def shear(X, factor=1, axis=-1):
"""Shear a matrix by a given factor.
The column ``X[:, n]`` will be displaced (rolled)
by ``factor * n``
This is primarily useful for converting between lag and recurrence
representations: shearing with ``factor=-1`` converts the main diagonal
to a horizontal. Shearing with ``factor=1`` converts a horizontal to
a diagonal.
Parameters
----------
X : np.ndarray [ndim=2] or scipy.sparse matrix
The array to be sheared
factor : integer
The shear factor: ``X[:, n] -> np.roll(X[:, n], factor * n)``
axis : integer
The axis along which to shear
Returns
-------
X_shear : same type as ``X``
The sheared matrix
Examples
--------
>>> E = np.eye(3)
>>> librosa.util.shear(E, factor=-1, axis=-1)
array([[1., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]])
>>> librosa.util.shear(E, factor=-1, axis=0)
array([[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]])
>>> librosa.util.shear(E, factor=1, axis=-1)
array([[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]])
"""
if not np.issubdtype(type(factor), np.integer):
raise ParameterError("factor={} must be integer-valued".format(factor))
if scipy.sparse.isspmatrix(X):
return __shear_sparse(X, factor=factor, axis=axis)
else:
return __shear_dense(X, factor=factor, axis=axis)
def stack(arrays, axis=0):
"""Stack one or more arrays along a target axis.
This function is similar to `np.stack`, except that memory contiguity is
retained when stacking along the first dimension.
This is useful when combining multiple monophonic audio signals into a
multi-channel signal, or when stacking multiple feature representations
to form a multi-dimensional array.
Parameters
----------
arrays : list
one or more `np.ndarray`
axis : integer
The target axis along which to stack. ``axis=0`` creates a new first axis,
and ``axis=-1`` creates a new last axis.
Returns
-------
arr_stack : np.ndarray [shape=(len(arrays), array_shape) or shape=(array_shape, len(arrays))]
The input arrays, stacked along the target dimension.
If ``axis=0``, then ``arr_stack`` will be F-contiguous.
Otherwise, ``arr_stack`` will be C-contiguous by default, as computed by
`np.stack`.
Raises
------
ParameterError
- If ``arrays`` do not all have the same shape
- If no ``arrays`` are given
See Also
--------
numpy.stack
numpy.ndarray.flags
frame
Examples
--------
Combine two buffers into a contiguous arrays
>>> y_left = np.ones(5)
>>> y_right = -np.ones(5)
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=0)
>>> y_stereo
array([[ 1., 1., 1., 1., 1.],
[-1., -1., -1., -1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
Or along the trailing axis
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=-1)
>>> y_stereo
array([[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
shapes = {arr.shape for arr in arrays}
if len(shapes) > 1:
raise ParameterError("all input arrays must have the same shape")
elif len(shapes) < 1:
raise ParameterError("at least one input array must be provided for stack")
shape_in = shapes.pop()
if axis != 0:
return np.stack(arrays, axis=axis)
else:
# If axis is 0, enforce F-ordering
shape = tuple([len(arrays)] + list(shape_in))
# Find the common dtype for all inputs
dtype = np.find_common_type([arr.dtype for arr in arrays], [])
# Allocate an empty array of the right shape and type
result = np.empty(shape, dtype=dtype, order="F")
# Stack into the preallocated buffer
np.stack(arrays, axis=axis, out=result)
return result
def dtype_r2c(d, default=np.complex64):
"""Find the complex numpy dtype corresponding to a real dtype.
This is used to maintain numerical precision and memory footprint
when constructing complex arrays from real-valued data
(e.g. in a Fourier transform).
A `float32` (single-precision) type maps to `complex64`,
while a `float64` (double-precision) maps to `complex128`.
Parameters
----------
d : np.dtype
The real-valued dtype to convert to complex.
If ``d`` is a complex type already, it will be returned.
default : np.dtype, optional
The default complex target type, if ``d`` does not match a
known dtype
Returns
-------
d_c : np.dtype
The complex dtype
See Also
--------
dtype_c2r
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.float32)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.int16)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('complex128')
"""
mapping = {
np.dtype(np.float32): np.complex64,
np.dtype(np.float64): np.complex128,
np.dtype(np.float): np.complex,
}
# If we're given a complex type already, return it
dt = np.dtype(d)
if dt.kind == "c":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(dt, default))
def dtype_c2r(d, default=np.float32):
"""Find the real numpy dtype corresponding to a complex dtype.
This is used to maintain numerical precision and memory footprint
when constructing real arrays from complex-valued data
(e.g. in an inverse Fourier transform).
A `complex64` (single-precision) type maps to `float32`,
while a `complex128` (double-precision) maps to `float64`.
Parameters
----------
d : np.dtype
The complex-valued dtype to convert to real.
If ``d`` is a real (float) type already, it will be returned.
default : np.dtype, optional
The default real target type, if ``d`` does not match a
known dtype
Returns
-------
d_r : np.dtype
The real dtype
See Also
--------
dtype_r2c
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.complex64)
dtype('float32')
>>> librosa.util.dtype_r2c(np.float32)
dtype('float32')
>>> librosa.util.dtype_r2c(np.int16)
dtype('float32')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('float64')
"""
mapping = {
np.dtype(np.complex64): np.float32,
np.dtype(np.complex128): np.float64,
np.dtype(np.complex): np.float,
}
# If we're given a real type already, return it
dt = np.dtype(d)
if dt.kind == "f":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(np.dtype(d), default))
| isc |
YeoLab/anchor | anchor/simulate.py | 1 | 7366 |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import six
from .visualize import violinplot, MODALITY_ORDER, MODALITY_TO_COLOR, barplot
def add_noise(data, iteration_per_noise=100,
noise_percentages=np.arange(0, 101, step=10), plot=True,
violinplot_kws=None, figure_prefix='anchor_simulation'):
data_dfs = []
violinplot_kws = {} if violinplot_kws is None else violinplot_kws
width = len(data.columns) * 0.75
alpha = max(0.05, 1. / iteration_per_noise)
for noise_percentage in noise_percentages:
if plot:
fig, ax = plt.subplots(figsize=(width, 3))
for iteration in range(iteration_per_noise):
if iteration > 0 and noise_percentage == 0:
continue
noisy_data = data.copy()
shape = (noisy_data.shape[0] * noise_percentage / 100,
noisy_data.shape[1])
size = np.product(shape)
noise_ind = np.random.choice(noisy_data.index,
size=noise_percentage,
replace=False)
noisy_data.loc[noise_ind] = np.random.uniform(
low=0., high=1., size=size).reshape(shape)
renamer = dict(
(col, '{}_noise{}_iter{}'.format(
col, noise_percentage, iteration))
for col in noisy_data.columns)
renamed = noisy_data.rename(columns=renamer)
data_dfs.append(renamed)
if plot:
noisy_data_tidy = noisy_data.unstack()
noisy_data_tidy = noisy_data_tidy.reset_index()
noisy_data_tidy = noisy_data_tidy.rename(
columns={'level_0': 'Feature ID',
'level_1': 'Sample ID',
0: '$\Psi$'})
violinplot(x='Feature ID', y='$\Psi$',
data=noisy_data_tidy, ax=ax,
**violinplot_kws)
if plot:
if noise_percentage > 0:
for c in ax.collections:
c.set_alpha(alpha)
ax.set(ylim=(0, 1), title='{}% Uniform Noise'.format(
noise_percentage), yticks=(0, 0.5, 1), ylabel='$\Psi$',
xlabel='')
plt.setp(ax.get_xticklabels(), rotation=90)
sns.despine()
fig.tight_layout()
fig.savefig('{}_noise_percentage_{}.pdf'.format(figure_prefix,
noise_percentage))
all_noisy_data = pd.concat(data_dfs, axis=1)
return all_noisy_data
class ModalityEvaluator(object):
def __init__(self, estimator, data, waypoints, fitted, predicted):
self.estimator = estimator
self.data = data
self.predicted = predicted
self.fitted = fitted
self.waypoints = waypoints
def evaluate_estimator(estimator, data, waypoints=None, figure_prefix=''):
#
# estimator.violinplot(n=1e3)
# fig = plt.gcf()
# for ax in fig.axes:
# ax.set(yticks=[0, 0.5, 1], xlabel='')
# # xticklabels =
# # ax.set_xticklabels(fontsize=20)
# fig.tight_layout()
# sns.despine()
# fig.savefig('{}_modality_parameterization.pdf'.format(figure_prefix))
fitted = estimator.fit(data)
predicted = estimator.predict(fitted)
predicted.name = 'Predicted Modality'
fitted_tidy = fitted.stack().reset_index()
fitted_tidy = fitted_tidy.rename(
columns={'level_1': 'Feature ID', 'level_0': "Modality",
0: estimator.score_name}, copy=False)
predicted_tidy = predicted.to_frame().reset_index()
predicted_tidy = predicted_tidy.rename(columns={'index': 'Feature ID'})
predicted_tidy = predicted_tidy.merge(
fitted_tidy, left_on=['Feature ID', 'Predicted Modality'],
right_on=['Feature ID', 'Modality'])
# Make categorical so they are plotted in the correct order
predicted_tidy['Predicted Modality'] = \
pd.Categorical(predicted_tidy['Predicted Modality'],
categories=MODALITY_ORDER, ordered=True)
predicted_tidy['Modality'] = \
pd.Categorical(predicted_tidy['Modality'],
categories=MODALITY_ORDER, ordered=True)
grouped = data.groupby(predicted, axis=1)
size = 5
fig, axes = plt.subplots(figsize=(size*0.75, 8), nrows=len(grouped))
for ax, (modality, df) in zip(axes, grouped):
random_ids = np.random.choice(df.columns, replace=False, size=size)
random_df = df[random_ids]
tidy_random = random_df.stack().reset_index()
tidy_random = tidy_random.rename(columns={'level_0': 'sample_id',
'level_1': 'event_id',
0: '$\Psi$'})
sns.violinplot(x='event_id', y='$\Psi$', data=tidy_random,
color=MODALITY_TO_COLOR[modality], ax=ax,
inner=None, bw=0.2, scale='width')
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1), xticks=[], xlabel='',
title=modality)
sns.despine()
fig.tight_layout()
fig.savefig('{}_random_estimated_modalities.pdf'.format(figure_prefix))
g = barplot(predicted_tidy, hue='Modality')
g.savefig('{}_modalities_barplot.pdf'.format(figure_prefix))
plot_best_worst_fits(predicted_tidy, data, modality_col='Modality',
score=estimator.score_name)
fig = plt.gcf()
fig.savefig('{}_best_worst_fit_violinplots.pdf'.format(figure_prefix))
fitted.to_csv('{}_fitted.csv'.format(figure_prefix))
predicted.to_csv('{}_predicted.csv'.format(figure_prefix))
result = ModalityEvaluator(estimator, data, waypoints, fitted, predicted)
return result
def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality"""
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout()
| bsd-3-clause |
spacetelescope/stsci.tools | doc/source/conf.py | 1 | 7012 | # -*- coding: utf-8 -*-
#
# stsci.tools documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 7 13:09:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from stsci.tools import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stsci.tools'
copyright = u'2020, STScI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_static_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = ['py-modindex']
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'stsci.toolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'stsci.tools.tex', u'stsci.tools Documentation',
# u'SSB', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/',
(None, 'http://data.astropy.org/intersphinx/matplotlib.inv')),
'astropy': ('https://docs.astropy.org/en/stable/', None)
}
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/preprocessing/_discretization.py | 5 | 13176 | # -*- coding: utf-8 -*-
# Author: Henry Lin <hlin117@gmail.com>
# Tom Dupré la Tour
# License: BSD
import numbers
import numpy as np
import warnings
from . import OneHotEncoder
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
class KBinsDiscretizer(TransformerMixin, BaseEstimator):
"""
Bin continuous data into intervals.
Read more in the :ref:`User Guide <preprocessing_discretization>`.
.. versionadded:: 0.20
Parameters
----------
n_bins : int or array-like of shape (n_features,), default=5
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot'
Method used to encode the transformed result.
onehot
Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
onehot-dense
Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
ordinal
Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile'
Strategy used to define the widths of the bins.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D k-means
cluster.
dtype : {np.float32, np.float64}, default=None
The desired data-type for the output. If None, output dtype is
consistent with input dtype. Only np.float32 and np.float64 are
supported.
.. versionadded:: 0.24
Attributes
----------
n_bins_ : ndarray of shape (n_features,), dtype=np.int_
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
bin_edges_ : ndarray of ndarray of shape (n_features,)
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
See Also
--------
Binarizer : Class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
Notes
-----
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).
Examples
--------
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
>>> est.fit(X)
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt # doctest: +SKIP
array([[ 0., 0., 0., 0.],
[ 1., 1., 1., 0.],
[ 2., 2., 2., 1.],
[ 2., 2., 2., 2.]])
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
"""
@_deprecate_positional_args
def __init__(self, n_bins=5, *, encode='onehot', strategy='quantile',
dtype=None):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
self.dtype = dtype
def fit(self, X, y=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features), dtype={int, float}
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
self
"""
X = self._validate_data(X, dtype='numeric')
supported_dtype = (np.float64, np.float32)
if self.dtype in supported_dtype:
output_dtype = self.dtype
elif self.dtype is None:
output_dtype = X.dtype
else:
raise ValueError(
f"Valid options for 'dtype' are "
f"{supported_dtype + (None,)}. Got dtype={self.dtype} "
f" instead."
)
valid_encode = ('onehot', 'onehot-dense', 'ordinal')
if self.encode not in valid_encode:
raise ValueError("Valid options for 'encode' are {}. "
"Got encode={!r} instead."
.format(valid_encode, self.encode))
valid_strategy = ('uniform', 'quantile', 'kmeans')
if self.strategy not in valid_strategy:
raise ValueError("Valid options for 'strategy' are {}. "
"Got strategy={!r} instead."
.format(valid_strategy, self.strategy))
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn("Feature %d is constant and will be "
"replaced with 0." % jj)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == 'uniform':
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == 'quantile':
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
elif self.strategy == 'kmeans':
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(column[:, None]).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ('quantile', 'kmeans'):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn('Bins whose width are too small (i.e., <= '
'1e-8) in feature %d are removed. Consider '
'decreasing the number of bins.' % jj)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if 'onehot' in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse=self.encode == 'onehot',
dtype=output_dtype)
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature.
"""
orig_bins = self.n_bins
if isinstance(orig_bins, numbers.Number):
if not isinstance(orig_bins, numbers.Integral):
raise ValueError("{} received an invalid n_bins type. "
"Received {}, expected int."
.format(KBinsDiscretizer.__name__,
type(orig_bins).__name__))
if orig_bins < 2:
raise ValueError("{} received an invalid number "
"of bins. Received {}, expected at least 2."
.format(KBinsDiscretizer.__name__, orig_bins))
return np.full(n_features, orig_bins, dtype=int)
n_bins = check_array(orig_bins, dtype=int, copy=True,
ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array "
"of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError("{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int."
.format(KBinsDiscretizer.__name__, indices))
return n_bins
def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : array-like of shape (n_samples, n_features), dtype={int, float}
Data to be discretized.
Returns
-------
Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
Data in the binned space. Will be a sparse matrix if
`self.encode='onehot'` and ndarray otherwise.
"""
check_is_fitted(self)
# check input and attribute dtypes
dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False)
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation. See documentation of
# numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.e-5
atol = 1.e-8
eps = atol + rtol * np.abs(Xt[:, jj])
Xt[:, jj] = np.digitize(Xt[:, jj] + eps, bin_edges[jj][1:])
np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)
if self.encode == 'ordinal':
return Xt
dtype_init = None
if 'onehot' in self.encode:
dtype_init = self._encoder.dtype
self._encoder.dtype = Xt.dtype
try:
Xt_enc = self._encoder.transform(Xt)
finally:
# revert the initial dtype to avoid modifying self.
self._encoder.dtype = dtype_init
return Xt_enc
def inverse_transform(self, Xt):
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
Xt : array-like of shape (n_samples, n_features), dtype={int, float}
Transformed data in the binned space.
Returns
-------
Xinv : ndarray, dtype={np.float32, np.float64}
Data in the original feature space.
"""
check_is_fitted(self)
if 'onehot' in self.encode:
Xt = self._encoder.inverse_transform(Xt)
Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32))
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xinv.shape[1]))
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[np.int_(Xinv[:, jj])]
return Xinv
| bsd-3-clause |
ngcurrier/ProteusCFD | GUI/dakotaHistogram.py | 1 | 1543 | #!/usr/bin/python
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#reads a space delimited file with a header and returns a dictionary
#attempts to cast dictionary entries into floats, if it fails, leaves as is
def readSpaceDelimitedFile(filename):
f = open(filename, 'r')
headers = f.readline().split()
dict = {}
for header in headers:
dict[header] = []
for line in f:
items = line.split()
i = 0
for header in headers:
try:
dict[header].append(float(items[i]))
except:
dict[header].append(items[i])
i = i + 1
f.close()
return dict
#plots a histogram of data, computes basic stats, and labels chart
def plotHistogram(data, seriesName):
# the histogram of the data
n, bins, patches = plt.hist(data, 50, normed=1, facecolor='green', alpha=0.75)
mu = np.mean(data)
sigma = np.std(data)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel(seriesName)
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ ' + seriesName + ':}\ \mu=' + str(mu) +',\ \sigma=' + str(sigma) +'$')
plt.grid(True)
plt.show()
if __name__ == '__main__':
data = readSpaceDelimitedFile('dakota_tabular.dat')
print data
for idata in data:
if idata != 'interface' and idata != '%eval_id':
plotHistogram(data[idata], idata)
| gpl-3.0 |
jmschrei/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 9 | 5718 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214:
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
Diyago/Machine-Learning-scripts | DEEP LEARNING/segmentation/Severstal-Steel-Defect-Detection-master/common_blocks/new_metrics.py | 1 | 15642 | from functools import partial
import numpy as np
import torch
from catalyst.dl import Callback, RunnerState, MetricCallback, CallbackOrder
from pytorch_toolbelt.utils.catalyst.visualization import get_tensorboard_logger
from pytorch_toolbelt.utils.torch_utils import to_numpy
from pytorch_toolbelt.utils.visualization import (
render_figure_to_tensor,
plot_confusion_matrix,
)
from sklearn.metrics import f1_score, multilabel_confusion_matrix
__all__ = [
"pixel_accuracy",
"binary_dice_iou_score",
"multiclass_dice_iou_score",
"multilabel_dice_iou_score",
"PixelAccuracyCallback",
"MacroF1Callback",
"ConfusionMatrixCallback",
"IoUMetricsCallback",
]
BINARY_MODE = "binary"
MULTICLASS_MODE = "multiclass"
MULTILABEL_MODE = "multilabel"
def pixel_accuracy(outputs: torch.Tensor, targets: torch.Tensor, ignore_index=None):
"""
Compute the pixel accuracy
"""
outputs = outputs.detach()
targets = targets.detach()
if ignore_index is not None:
mask = targets != ignore_index
outputs = outputs[mask]
targets = targets[mask]
outputs = (outputs > 0).float()
correct = float(torch.sum(outputs == targets))
total = targets.numel()
return correct / total
class PixelAccuracyCallback(MetricCallback):
"""Pixel accuracy metric callback
"""
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "accuracy",
ignore_index=None,
):
"""
:param input_key: input key to use for iou calculation;
specifies our `y_true`.
:param output_key: output key to use for iou calculation;
specifies our `y_pred`
:param ignore_index: same meaning as in nn.CrossEntropyLoss
"""
super().__init__(
prefix=prefix,
metric_fn=partial(pixel_accuracy, ignore_index=ignore_index),
input_key=input_key,
output_key=output_key,
)
class ConfusionMatrixCallback(Callback):
"""
Compute and log confusion matrix to Tensorboard.
For use with Multiclass classification/segmentation.
"""
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "confusion_matrix",
class_names=None,
ignore_index=None,
):
"""
:param input_key: input key to use for precision calculation;
specifies our `y_true`.
:param output_key: output key to use for precision calculation;
specifies our `y_pred`.
:param ignore_index: same meaning as in nn.CrossEntropyLoss
"""
super().__init__(CallbackOrder.Logger)
self.prefix = prefix
self.class_names = class_names
self.output_key = output_key
self.input_key = input_key
self.outputs = []
self.targets = []
self.ignore_index = ignore_index
def on_loader_start(self, state):
self.outputs = []
self.targets = []
def on_batch_end(self, state: RunnerState):
outputs = to_numpy(torch.sigmoid(state.output[self.output_key]))
targets = to_numpy(state.input[self.input_key])
# outputs = 1*(outputs>0.5)
# targets = np.argmax(targets, axis=1)
if self.ignore_index is not None:
mask = targets != self.ignore_index
outputs = outputs[mask]
targets = targets[mask]
self.outputs.extend(outputs)
self.targets.extend(targets)
def on_loader_end(self, state):
targets = np.array(self.targets)
outputs = np.array(self.outputs)
if self.class_names is None:
class_names = [str(i) for i in range(targets.shape[1])]
else:
class_names = self.class_names
num_classes = len(class_names)
best_score = 0
best_th = 0
best_fsores = {c: 0 for c in range(num_classes)}
best_fsores_th = {}
for th in np.linspace(0, 1, 41):
cm = multilabel_confusion_matrix(
targets, outputs > th, labels=range(num_classes)
)
for c in range(num_classes):
tn, fp, fn, tp = cm[c].ravel()
if (tp + fp) == 0:
precision = 0
else:
precision = tp / (tp + fp)
if (tp + fn) == 0:
recall = 0
else:
recall = tp / (tp + fn)
if precision == 0 or recall == 0:
fscore = 0
else:
fscore = 2 * (precision * recall / (precision + recall))
if best_fsores[c] < fscore:
best_fsores_th[c] = th
state.metrics.epoch_values[state.loader_name][
str(c) + "_precision_best"
] = precision
state.metrics.epoch_values[state.loader_name][
str(c) + "_recall_best"
] = recall
state.metrics.epoch_values[state.loader_name][
str(c) + "_fscore_best"
] = fscore
state.metrics.epoch_values[state.loader_name][
str(c) + "_fscore_best_th"
] = th
best_fsores[c] = fscore
state.metrics.epoch_values[state.loader_name]["fscore_macro_best"] = np.mean(
[best_fsores[i] for i in best_fsores]
)
cm = multilabel_confusion_matrix(
targets, outputs > 0.5, labels=range(num_classes)
)
for c in range(num_classes):
tn, fp, fn, tp = cm[c].ravel()
if (tp + fp) == 0:
precision = 0
else:
precision = tp / (tp + fp)
if (tp + fn) == 0:
recall = 0
else:
recall = tp / (tp + fn)
if precision == 0 or recall == 0:
fscore = 0
else:
fscore = 2 * (precision * recall / (precision + recall))
state.metrics.epoch_values[state.loader_name][
str(c) + "_precision_05"
] = precision
state.metrics.epoch_values[state.loader_name][
str(c) + "_recall_05"
] = recall
state.metrics.epoch_values[state.loader_name][
str(c) + "_fscore_05"
] = fscore
# logger = get_tensorboard_logger(state)
# logger.add_image(f"{self.prefix}/epoch", fig, global_step=state.step)
class MacroF1Callback(Callback):
"""
Compute F1-macro metric
"""
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "macro_f1",
ignore_index=None,
):
"""
:param input_key: input key to use for precision calculation;
specifies our `y_true`.
:param output_key: output key to use for precision calculation;
specifies our `y_pred`.
"""
super().__init__(CallbackOrder.Metric)
self.metric_fn = lambda outputs, targets: f1_score(
targets, outputs, average="macro"
)
self.prefix = prefix
self.output_key = output_key
self.input_key = input_key
self.outputs = []
self.targets = []
self.ignore_index = ignore_index
def on_batch_end(self, state: RunnerState):
outputs = to_numpy(torch.sigmoid(state.output[self.output_key]))
targets = to_numpy(state.input[self.input_key])
num_classes = outputs.shape[1]
outputs = 1 * (outputs > 0.5)
# targets = np.argmax(targets, axis=1)
if self.ignore_index is not None:
mask = targets != self.ignore_index
outputs = outputs[mask]
targets = targets[mask]
# outputs = [np.eye(num_classes)[y] for y in outputs]
# targets = [np.eye(num_classes)[y] for y in targets]
self.outputs.extend(outputs)
self.targets.extend(targets)
# metric = self.metric_fn(self.targets, self.outputs)
# state.metrics.add_batch_value(name=self.prefix, value=metric)
def on_loader_start(self, state):
self.outputs = []
self.targets = []
def on_loader_end(self, state):
metric_name = self.prefix
targets = np.array(self.targets)
outputs = np.array(self.outputs)
metric = self.metric_fn(outputs, targets)
state.metrics.epoch_values[state.loader_name][metric_name] = metric
def binary_dice_iou_score(
y_pred: torch.Tensor,
y_true: torch.Tensor,
mode="dice",
threshold=None,
nan_score_on_empty=False,
eps=1e-7,
) -> float:
"""
Compute IoU score between two image tensors
:param y_pred: Input image tensor of any shape
:param y_true: Target image of any shape (must match size of y_pred)
:param mode: Metric to compute (dice, iou)
:param threshold: Optional binarization threshold to apply on @y_pred
:param nan_score_on_empty: If true, return np.nan if target has no positive pixels;
If false, return 1. if both target and input are empty, and 0 otherwise.
:param eps: Small value to add to denominator for numerical stability
:return: Float scalar
"""
assert mode in {"dice", "iou"}
# Binarize predictions
if threshold is not None:
y_pred = (y_pred > threshold).to(y_true.dtype)
intersection = torch.sum(y_pred * y_true).item()
cardinality = (torch.sum(y_pred) + torch.sum(y_true)).item()
if mode == "dice":
score = (2.0 * intersection) / (cardinality + eps)
else:
score = intersection / (cardinality + eps)
has_targets = torch.sum(y_true) > 0
has_predicted = torch.sum(y_pred) > 0
if not has_targets:
if nan_score_on_empty:
score = np.nan
else:
score = float(not has_predicted)
return score
def multiclass_dice_iou_score(
y_pred: torch.Tensor,
y_true: torch.Tensor,
mode="dice",
threshold=None,
eps=1e-7,
nan_score_on_empty=False,
classes_of_interest=None,
):
ious = []
num_classes = y_pred.size(0)
y_pred = y_pred.argmax(dim=0)
if classes_of_interest is None:
classes_of_interest = range(num_classes)
for class_index in classes_of_interest:
iou = binary_dice_iou_score(
y_pred=(y_pred == class_index).float(),
y_true=(y_true == class_index).float(),
mode=mode,
nan_score_on_empty=nan_score_on_empty,
threshold=threshold,
eps=eps,
)
ious.append(iou)
return ious
def multilabel_dice_iou_score(
y_true: torch.Tensor,
y_pred: torch.Tensor,
mode="dice",
threshold=None,
eps=1e-7,
nan_score_on_empty=False,
classes_of_interest=None,
):
ious = []
num_classes = y_pred.size(0)
if classes_of_interest is None:
classes_of_interest = range(num_classes)
for class_index in classes_of_interest:
iou = binary_dice_iou_score(
y_pred=y_pred[class_index],
y_true=y_true[class_index],
mode=mode,
threshold=threshold,
nan_score_on_empty=nan_score_on_empty,
eps=eps,
)
ious.append(iou)
return ious
class IoUMetricsCallback(Callback):
"""
A metric callback for computing either Dice or Jaccard metric
which is computed across whole epoch, not per-batch.
"""
def __init__(
self,
mode: str,
metric="dice",
class_names=None,
classes_of_interest=None,
input_key: str = "targets",
output_key: str = "logits",
nan_score_on_empty=True,
prefix: str = None,
):
"""
:param mode: One of: 'binary', 'multiclass', 'multilabel'.
:param input_key: input key to use for precision calculation; specifies our `y_true`.
:param output_key: output key to use for precision calculation; specifies our `y_pred`.
:param accuracy_for_empty:
"""
super().__init__(CallbackOrder.Metric)
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
if prefix is None:
prefix = metric
if classes_of_interest is not None:
if classes_of_interest.dtype == np.bool:
num_classes = len(classes_of_interest)
classes_of_interest = np.arange(num_classes)[classes_of_interest]
if class_names is not None:
if len(class_names) != len(classes_of_interest):
raise ValueError(
"Length of 'classes_of_interest' must be equal to length of 'classes_of_interest'"
)
self.mode = mode
self.prefix = prefix
self.output_key = output_key
self.input_key = input_key
self.class_names = class_names
self.classes_of_interest = classes_of_interest
self.scores = []
if self.mode == BINARY_MODE:
self.score_fn = partial(
binary_dice_iou_score,
threshold=0.0,
nan_score_on_empty=nan_score_on_empty,
mode=metric,
)
if self.mode == MULTICLASS_MODE:
self.score_fn = partial(
multiclass_dice_iou_score,
mode=metric,
threshold=0.0,
nan_score_on_empty=nan_score_on_empty,
classes_of_interest=self.classes_of_interest,
)
if self.mode == MULTILABEL_MODE:
self.score_fn = partial(
multilabel_dice_iou_score,
mode=metric,
threshold=0.5,
nan_score_on_empty=nan_score_on_empty,
classes_of_interest=self.classes_of_interest,
)
def on_loader_start(self, state):
self.scores = []
@torch.no_grad()
def on_batch_end(self, state: RunnerState):
outputs = state.output[self.output_key].detach()
targets = state.input[self.input_key].detach()
batch_size = targets.size(0)
score_per_image = []
for image_index in range(batch_size):
score_per_class = self.score_fn(
y_pred=outputs[image_index], y_true=targets[image_index]
)
score_per_image.append(score_per_class)
mean_score = np.nanmean(score_per_image)
state.metrics.add_batch_value(self.prefix, float(mean_score))
self.scores.extend(score_per_image)
def on_loader_end(self, state):
scores = np.array(self.scores)
mean_score = np.nanmean(scores)
state.metrics.epoch_values[state.loader_name][self.prefix] = float(mean_score)
# Log additional IoU scores per class
if self.mode in {MULTICLASS_MODE, MULTILABEL_MODE}:
num_classes = scores.shape[1]
class_names = self.class_names
if class_names is None:
class_names = [f"class_{i}" for i in range(num_classes)]
scores_per_class = np.nanmean(scores, axis=0)
for class_name, score_per_class in zip(class_names, scores_per_class):
state.metrics.epoch_values[state.loader_name][
self.prefix + "_" + class_name
] = float(score_per_class)
| apache-2.0 |
mrshu/scikit-learn | sklearn/covariance/__init__.py | 10 | 1197 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope, EllipticEnvelop
__all__ = ['EllipticEnvelop',
'EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
ThorbenJensen/wifi-locator | src/utils_classification.py | 1 | 2437 | """Module provides classification of signals and evaluates models."""
from random import randrange
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# classification models
classifiers = {'K-Nearest Neighbors (Braycurtis norm)':
KNeighborsClassifier(n_neighbors=3, algorithm='auto',
metric='braycurtis'),
'Random Forest':
RandomForestClassifier(n_estimators=80, n_jobs=1),
'SVM': SVC(gamma=2, C=1),
'Linear Support Vector Machine': SVC(kernel="linear", C=0.025),
'Decision Tree': DecisionTreeClassifier(max_depth=5),
'Ada Boost': AdaBoostClassifier(n_estimators=80,
learning_rate=0.4),
'Naive Bayes': GaussianNB(),
}
vc = VotingClassifier(estimators=list(classifiers.items()), voting='hard')
def evaluate_model(model_name, model, x, y):
"""Evaluate model accuracy via cross validation."""
print('%s:' % model_name)
model.fit(x, y.values.ravel())
print('CV f1_micro (not reusing data): %s' % np.mean(cross_val_score(model,
x, y.values.ravel(), cv=5, scoring='f1_micro')))
def predict(x, y, signal_matrix, verbose=1):
"""Predict current location, based on hard voting among classifiers."""
# TODO: classify based on *balanced* sample (repeated sampling strategy)
# report for models within VotingClassifier
for key in classifiers.keys():
model = classifiers[key]
model.fit(x, y.values.ravel())
location = model.predict(signal_matrix)[0]
if verbose > 0:
print('Model "%s": %s' % (key, location))
# report for VotingClassifier
vc.fit(x, y.values.ravel())
vc_locations = vc.predict(signal_matrix)
# in case VotingClassifier returns more than one result: draw random
rand_index = randrange(0, len(vc_locations))
vc_location = vc_locations[rand_index]
if verbose > 0:
print('VotingClassifier result: %s' % vc_location)
return vc_location
| apache-2.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/series.py | 1 | 89595 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import types
import warnings
from numpy import nan, ndarray
import numpy as np
import numpy.ma as ma
from pandas.core.common import (isnull, notnull, is_bool_indexer,
_default_index, _maybe_upcast,
_asarray_tuplesafe, _infer_dtype_from_scalar,
is_list_like, _values_from_object,
_possibly_cast_to_datetime, _possibly_castable,
_possibly_convert_platform, _try_sort,
ABCSparseArray, _maybe_match_name, _coerce_to_dtype,
_ensure_object, SettingWithCopyError,
_maybe_box_datetimelike, ABCDataFrame)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
from pandas.core.categorical import Categorical, CategoricalAccessor
from pandas.tseries.common import (maybe_to_datetimelike,
CombinedDatetimelikeProperties)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex, Period
from pandas import compat
from pandas.util.terminal import get_terminal_size
from pandas.compat import zip, u, OrderedDict, StringIO
import pandas.core.ops as ops
from pandas.core.algorithms import select_n
import pandas.core.common as com
import pandas.core.datetools as datetools
import pandas.core.format as fmt
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, cache_readonly
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from numpy import percentile as _quantile
from pandas.core.config import get_option
__all__ = ['Series']
_shared_doc_kwargs = dict(
axes='index',
klass='Series',
axes_single_arg="{0, 'index'}",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
duplicated='Series'
)
def _coerce_method(converter):
""" install the scalar coercion methods """
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError(
"cannot convert the series to {0}".format(str(converter)))
return wrapper
#----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be any hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN)
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like or Index (1d)
Values must be unique and hashable, same length as data. Index
object (or other iterable of same length as data) Will default to
np.arange(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
_metadata = ['name']
_accessors = frozenset(['dt', 'cat', 'str'])
_allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
"MultiIndex is not supported")
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True)
copy = True
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
if index is None:
if isinstance(data, OrderedDict):
index = Index(data)
else:
index = Index(_try_sort(data))
try:
if isinstance(index, DatetimeIndex):
# coerce back to datetime objects for lookup
data = lib.fast_multiget(data, index.astype('O'),
default=np.nan)
elif isinstance(index, PeriodIndex):
data = [data.get(i, nan) for i in index]
else:
data = lib.fast_multiget(data, index.values,
default=np.nan)
except TypeError:
data = [data.get(i, nan) for i in index]
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
if dtype is not None:
raise ValueError("cannot specify a dtype with a Categorical")
if name is None:
name = data.name
elif (isinstance(data, types.GeneratorType) or
(compat.PY3 and isinstance(data, map))):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, raise_on_error=False)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy,
raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
object.__setattr__(self, 'name', name)
self._set_axis(0, index, fastpath=True)
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
# return a sparse series here
if isinstance(arr, ABCSparseArray):
from pandas.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath)
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
@property
def is_time_series(self):
return self._subtyp in ['time_series', 'sparse_time_series']
_index = None
def _set_axis(self, axis, labels, fastpath=False):
""" override generic, we want to set the _typ here """
if not fastpath:
labels = _ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
labels = DatetimeIndex(labels)
# need to set here becuase we changed the index
if fastpath:
self._data.set_axis(axis, labels)
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'time_series')
else:
object.__setattr__(self, '_subtyp', 'series')
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
# ndarray compatibility
@property
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def dtypes(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def ftype(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def ftypes(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray
Returns
-------
arr : numpy.ndarray
"""
return self._data.values
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
# ops
def ravel(self, order='C'):
"""
Return the flattened underlying data as an ndarray
See also
--------
numpy.ndarray.ravel
"""
return self.values.ravel(order=order)
def compress(self, condition, axis=0, out=None, **kwargs):
"""
Return selected slices of an array along given axis as a Series
See also
--------
numpy.ndarray.compress
"""
return self[condition]
def nonzero(self):
"""
Return the indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatability with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
See Also
--------
numpy.nonzero
"""
return self.values.nonzero()
def put(self, *args, **kwargs):
"""
return a ndarray with the values put
See also
--------
numpy.ndarray.put
"""
self.values.put(*args, **kwargs)
def __len__(self):
"""
return the length of the Series
"""
return len(self._data)
def view(self, dtype=None):
return self._constructor(self.values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
"""
the array interface, return my values
"""
return self.get_values()
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._constructor(result, index=self.index,
copy=False).__finalize__(self)
def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc
"""
# nice error message for non-ufunc types
if context is not None and not isinstance(self.values, np.ndarray):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(obj=type(obj).__name__,
dtype=getattr(obj,'dtype',None),
op=context[0].__name__))
return result
# complex
@property
def real(self):
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
# we are preserving name here
def __getstate__(self):
return dict(_data=self._data, name=self.name)
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
self.name = state['name']
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
return [self.index]
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self.values
if isinstance(values, np.ndarray):
return _index.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return _index.get_value_at(self, i)
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj, kind=kind or 'getitem')
return self._get_values(slobj)
def __getitem__(self, key):
try:
result = self.index.get_value(self, key)
if not np.isscalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if we have a non-unique index here
# otherwise have inline ndarray/lists
if not self.index.is_unique:
result = self._constructor(result,
index=[key]*len(result)
,dtype=self.dtype).__finalize__(self)
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key,kind='getitem')
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
if is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError('Indexing a Series with DataFrame is not supported, '\
'use the appropriate DataFrame column')
else:
if isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
return self.reindex(key)
else:
return self._get_values(key)
elif key_type == 'boolean':
return self._get_values(key)
else:
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.ix[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if any(k is None for k in key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError('Can only tuple-index with a MultiIndex')
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self.values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self.values[indexer]
def __setitem__(self, key, value):
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except (SettingWithCopyError):
raise
except (KeyError, ValueError):
values = self.values
if (com.is_integer(key)
and not self.index.inferred_type == 'integer'):
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif is_bool_indexer(key):
pass
elif com.is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if isnull(value):
value = tslib.iNaT
try:
self.index._engine.set_value(self.values, key, value)
return
except (TypeError):
pass
self.loc[key] = value
return
except TypeError as e:
if isinstance(key, tuple) and not isinstance(self.index,
MultiIndex):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if 'unorderable' in str(e): # pragma: no cover
raise IndexError(key)
if is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self.where(~key, value, inplace=True)
return
except (InvalidIndexError):
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self.values
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except:
key = [ key ]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.inferred_type == 'integer':
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == 'boolean':
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = _asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError('%s not contained in the index'
% str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key.values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def repeat(self, reps):
"""
return a new Series with the values repeated reps times
See also
--------
numpy.ndarray.repeat
"""
new_index = self.index.repeat(reps)
new_values = self.values.repeat(reps)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def reshape(self, *args, **kwargs):
"""
return an ndarray with the values shape
if the specified shape matches exactly the current shape, then
return self (for compat)
See also
--------
numpy.ndarray.take
"""
if len(args) == 1 and hasattr(args[0], '__iter__'):
shape = args[0]
else:
shape = args
if tuple(shape) == self.shape:
# XXX ignoring the "order" keyword.
return self
return self.values.reshape(shape, **kwargs)
iget_value = _ixs
iget = _ixs
irow = _ixs
def get_value(self, label, takeable=False):
"""
Quickly retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
if takeable is True:
return _maybe_box_datetimelike(self.values[label])
return self.index.get_value(self.values, label)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
series : Series
If label is contained, will be reference to calling Series,
otherwise a new object
"""
try:
if takeable:
self.values[label] = value
else:
self.index._engine.set_value(self.values, label, value)
return self
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return self
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Analogous to the :meth:`pandas.DataFrame.reset_index` function, see
docstring there.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns
name : object, default None
The name of the column corresponding to the Series values
inplace : boolean, default False
Modify the Series in place (do not create a new object)
Returns
----------
resetted : DataFrame, or Series if drop == True
"""
if drop:
new_index = np.arange(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self.values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
width, height = get_terminal_size()
max_rows = (height if get_option("display.max_rows") == 0
else get_option("display.max_rows"))
self.to_string(buf=buf, name=self.name, dtype=self.dtype,
max_rows=max_rows)
result = buf.getvalue()
return result
def _repr_footer(self):
namestr = u("Name: %s, ") % com.pprint_thing(
self.name) if self.name is not None else ""
# time series
if self.is_time_series:
if self.index.freq is not None:
freqstr = u('Freq: %s, ') % self.index.freqstr
else:
freqstr = u('')
return u('%s%sLength: %d') % (freqstr, namestr, len(self))
# Categorical
if com.is_categorical_dtype(self.dtype):
level_info = self.values._repr_categories_info()
return u('%sLength: %d, dtype: %s\n%s') % (namestr,
len(self),
str(self.dtype.name),
level_info)
# reg series
return u('%sLength: %d, dtype: %s') % (namestr,
len(self),
str(self.dtype.name))
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
length=False, dtype=False, name=False, max_rows=None):
"""
Render a string representation of the Series
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header: boolean, default True
Add the Series header (index name)
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
"""
the_repr = self._get_repr(float_format=float_format, na_rep=na_rep,
header=header, length=length, dtype=dtype,
name=name, max_rows=max_rows)
# catch contract violations
if not isinstance(the_repr, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(the_repr.__class__.__name__))
if buf is None:
return the_repr
else:
try:
buf.write(the_repr)
except AttributeError:
with open(buf, 'w') as f:
f.write(the_repr)
def _get_repr(
self, name=False, header=True, length=True, dtype=True, na_rep='NaN',
float_format=None, max_rows=None):
"""
Internal function, should always return unicode string
"""
formatter = fmt.SeriesFormatter(self, name=name,
length=length, header=header,
dtype=dtype,
na_rep=na_rep,
float_format=float_format,
max_rows=max_rows)
result = formatter.to_string()
# TODO: following check prob. not neces.
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
return result
def __iter__(self):
if com.is_categorical_dtype(self.dtype):
return iter(self.values)
elif np.issubdtype(self.dtype, np.datetime64):
return (lib.Timestamp(x) for x in self.values)
elif np.issubdtype(self.dtype, np.timedelta64):
return (lib.Timedelta(x) for x in self.values)
else:
return iter(self.values)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return zip(iter(self.index), iter(self))
if compat.PY3: # pragma: no cover
items = iteritems
#----------------------------------------------------------------------
# Misc public methods
def keys(self):
"Alias for index"
return self.index
def tolist(self):
""" Convert Series to a nested list """
return list(self)
def to_dict(self):
"""
Convert Series to {label -> value} dict
Returns
-------
value_dict : dict
"""
return dict(compat.iteritems(self))
def to_frame(self, name=None):
"""
Convert Series to DataFrame
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
data_frame : DataFrame
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind,
fill_value=fill_value).__finalize__(self)
#----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
if level is not None:
mask = notnull(self.values)
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
level_index = self.index.levels[level]
if len(self) == 0:
return self._constructor(0, index=level_index)\
.__finalize__(self)
# call cython function
max_bin = len(level_index)
labels = com._ensure_int64(self.index.labels[level])
counts = lib.count_level_1d(mask.view(np.uint8),
labels, max_bin)
return self._constructor(counts,
index=level_index).__finalize__(self)
return notnull(_values_from_object(self)).sum()
def mode(self):
"""Returns the mode(s) of the dataset.
Empty if nothing occurs at least 2 times. Always returns Series even
if only one value.
Parameters
----------
sort : bool, default True
If True, will lexicographically sort values, if False skips
sorting. Result ordering when ``sort=False`` is not defined.
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
from pandas.core.algorithms import mode
return mode(self)
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, take_last=False, inplace=False):
return super(Series, self).drop_duplicates(take_last=take_last,
inplace=inplace)
@Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, take_last=False):
return super(Series, self).duplicated(take_last=take_last)
def idxmin(self, axis=None, out=None, skipna=True):
"""
Index of first occurrence of minimum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmin : Index of minimum of values
Notes
-----
This method is the Series version of ``ndarray.argmin``.
See Also
--------
DataFrame.idxmin
numpy.ndarray.argmin
"""
i = nanops.nanargmin(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=None, out=None, skipna=True):
"""
Index of first occurrence of maximum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmax : Index of maximum of values
Notes
-----
This method is the Series version of ``ndarray.argmax``.
See Also
--------
DataFrame.idxmax
numpy.ndarray.argmax
"""
i = nanops.nanargmax(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = idxmin
argmax = idxmax
@Appender(np.ndarray.round.__doc__)
def round(self, decimals=0, out=None):
"""
"""
result = _values_from_object(self).round(decimals, out=out)
if out is None:
result = self._constructor(result,
index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
Returns
-------
quantile : float or Series
if ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles.
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
valid = self.dropna()
def multi(values, qs):
if com.is_list_like(qs):
return Series([_quantile(values, x*100)
for x in qs], index=qs)
else:
return _quantile(values, qs*100)
return self._maybe_box(lambda values: multi(values, q), dropna=True)
def ptp(self, axis=None, out=None):
return _values_from_object(self).ptp(axis, out)
def corr(self, other, method='pearson',
min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values
Parameters
----------
other : Series
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
correlation : float
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancorr(this.values, other.values, method=method,
min_periods=min_periods)
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values
Parameters
----------
other : Series
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
covariance : float
Normalized by N-1 (unbiased estimator).
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods)
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : Series
"""
result = com.diff(_values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Lag-N autocorrelation
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
autocorr : float
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Matrix multiplication with DataFrame or inner-product with Series
objects
Parameters
----------
other : Series or DataFrame
Returns
-------
dot_product : scalar or Series
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if (len(common) > len(self.index) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=other.columns).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def searchsorted(self, v, side='left', sorter=None):
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted Series `self` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `self` would be preserved.
Parameters
----------
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
Series.sort
Series.order
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x.searchsorted([1, 2], side='right', sorter=[0, 2, 1])
array([1, 3])
"""
if sorter is not None:
sorter = com._ensure_platform_int(sorter)
return self.values.searchsorted(Series(v).values, side=side,
sorter=sorter)
#------------------------------------------------------------------------------
# Combination
def append(self, to_append, verify_integrity=False):
"""
Concatenate two or more Series. The indexes must not overlap
Parameters
----------
to_append : Series or list/tuple of Series
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
"""
from pandas.tools.merge import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=False,
verify_integrity=verify_integrity)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
combined : Series
"""
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join='outer', copy=False)
new_index = this.index
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isnull(this_vals)
other_mask = isnull(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
result = func(this_vals, other_vals)
name = _maybe_match_name(self, other)
return self._constructor(result, index=new_index).__finalize__(self)
def combine(self, other, func, fill_value=nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
the other
Parameters
----------
other : Series or scalar value
func : function
fill_value : scalar value
Returns
-------
result : Series
"""
if isinstance(other, Series):
new_index = self.index.union(other.index)
new_name = _maybe_match_name(self, other)
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
new_values[i] = func(lv, rv)
else:
new_index = self.index
new_values = func(self.values, other)
new_name = self.name
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
name = _maybe_match_name(self, other)
rs_vals = com._where_compat(isnull(this), other.values, this.values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index
Parameters
----------
other : Series
"""
other = other.reindex_like(self)
mask = notnull(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
#----------------------------------------------------------------------
# Reindexing, sorting
def sort_index(self, ascending=True):
"""
Sort object by labels (along an axis)
Parameters
----------
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
Examples
--------
>>> result1 = s.sort_index(ascending=False)
>>> result2 = s.sort_index(ascending=[1, 0])
Returns
-------
sorted_obj : Series
"""
index = self.index
if isinstance(index, MultiIndex):
from pandas.core.groupby import _lexsort_indexer
indexer = _lexsort_indexer(index.labels, orders=ascending)
indexer = com._ensure_platform_int(indexer)
new_labels = index.take(indexer)
else:
new_labels, indexer = index.order(return_indexer=True,
ascending=ascending)
new_values = self.values.take(indexer)
return self._constructor(new_values,
index=new_labels).__finalize__(self)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values
Parameters
----------
axis : int (can only be zero)
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
Returns
-------
argsorted : Series, with -1 indicated where nan values are present
See also
--------
numpy.ndarray.argsort
"""
values = self.values
mask = isnull(values)
if mask.any():
result = Series(
-1, index=self.index, name=self.name, dtype='int64')
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result,
index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
def rank(self, method='average', na_option='keep', ascending=True,
pct=False):
"""
Compute data ranks (1 through n). Equal values are assigned a rank that
is the average of the ranks of those values
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
na_option : {'keep'}
keep: leave NA values where they are
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : Series
"""
from pandas.core.algorithms import rank
ranks = rank(self.values, method=method, na_option=na_option,
ascending=ascending, pct=pct)
return self._constructor(ranks, index=self.index).__finalize__(self)
def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):
"""
Sort values and index labels by value. This is an inplace sort by default.
Series.order is the equivalent but returns a new Series.
Parameters
----------
axis : int (can only be zero)
ascending : boolean, default True
Sort ascending. Passing False sorts descending
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
inplace : boolean, default True
Do operation in place.
See Also
--------
Series.order
"""
return self.order(ascending=ascending,
kind=kind,
na_position=na_position,
inplace=inplace)
def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False):
"""
Sorts Series object, by value, maintaining index-value link.
This will return a new Series by default. Series.sort is the equivalent but as an inplace method.
Parameters
----------
na_last : boolean (optional, default=True) (DEPRECATED; use na_position)
Put NaN's at beginning or end
ascending : boolean, default True
Sort ascending. Passing False sorts descending
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
inplace : boolean, default False
Do operation in place.
Returns
-------
y : Series
See Also
--------
Series.sort
"""
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
if na_last is not None:
warnings.warn(("na_last is deprecated. Please use na_position instead"),
FutureWarning)
na_position = 'last' if na_last else 'first'
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self.values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isnull(arr)
good = ~bad
idx = np.arange(len(self))
argsorted = _try_kind_sort(arr[good])
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def nlargest(self, n=5, take_last=False):
"""Return the largest `n` elements.
Parameters
----------
n : int
Return this many descending sorted values
take_last : bool
Where there are duplicate values, take the last duplicate
Returns
-------
top_n : Series
The n largest values in the Series, in sorted order
Notes
-----
Faster than ``.order(ascending=False).head(n)`` for small `n` relative
to the size of the ``Series`` object.
See Also
--------
Series.nsmallest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nlargest(10) # only sorts up to the N requested
"""
return select_n(self, n=n, take_last=take_last, method='nlargest')
def nsmallest(self, n=5, take_last=False):
"""Return the smallest `n` elements.
Parameters
----------
n : int
Return this many ascending sorted values
take_last : bool
Where there are duplicate values, take the last duplicate
Returns
-------
bottom_n : Series
The n smallest values in the Series, in sorted order
Notes
-----
Faster than ``.order().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
--------
Series.nlargest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nsmallest(10) # only sorts up to the N requested
"""
return select_n(self, n=n, take_last=take_last, method='nsmallest')
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort Series with MultiIndex by chosen level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int or level name, default None
ascending : bool, default True
Returns
-------
sorted : Series
"""
if not isinstance(self.index, MultiIndex):
raise TypeError('can only sort by level with a hierarchical index')
new_index, indexer = self.index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
new_values = self.values.take(indexer)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def swaplevel(self, i, j, copy=True):
"""
Swap levels i and j in a MultiIndex
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : Series
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self.values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
"""
Rearrange index levels using input order. May not drop or duplicate
levels
Parameters
----------
order: list of int representing new level order.
(reference level by number or key)
axis: where to reorder levels
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def unstack(self, level=-1):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
Examples
--------
>>> s
one a 1.
one b 2.
two a 3.
two b 4.
>>> s.unstack(level=-1)
a b
one 1. 2.
two 3. 4.
>>> s.unstack(level=0)
one two
a 1. 2.
b 3. 4.
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape import unstack
return unstack(self, level)
#----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values
Examples
--------
>>> x
one 1
two 2
three 3
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
Returns
-------
y : Series
same index as caller
"""
values = self.values
if com.is_datetime64_dtype(values.dtype):
values = lib.map_infer(values, lib.Timestamp)
if na_action == 'ignore':
mask = isnull(values)
def map_f(values, f):
return lib.map_infer_mask(values, f, mask.view(np.uint8))
else:
map_f = lib.map_infer
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = self._constructor(arg, index=arg.keys())
indexer = arg.index.get_indexer(values)
new_values = com.take_1d(arg.values, indexer)
return self._constructor(new_values,
index=self.index).__finalize__(self)
else:
mapped = map_f(values, arg)
return self._constructor(mapped,
index=self.index).__finalize__(self)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
See also
--------
Series.map: For element-wise operations
Returns
-------
y : Series or DataFrame if func returns a Series
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
index=self.index).__finalize__(self)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
return f(self)
values = _values_from_object(self)
if com.is_datetime64_dtype(values.dtype):
values = lib.map_infer(values, lib.Timestamp)
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped,
index=self.index).__finalize__(self)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
"""
perform a reduction operation
if we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object
"""
delegate = self.values
if isinstance(delegate, np.ndarray):
# Validate that 'axis' is consistent with Series's single axis.
self._get_axis_number(axis)
if numeric_only:
raise NotImplementedError(
'Series.{0} does not implement numeric_only.'.format(name))
return op(delegate, skipna=skipna, **kwds)
return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type, **kwds)
def _maybe_box(self, func, dropna=False):
"""
evaluate a function with possible input/output conversion if we are i8
Parameters
----------
dropna : bool, default False
whether to drop values if necessary
"""
if dropna:
values = self.dropna().values
else:
values = self.values
if com.needs_i8_conversion(self):
boxer = com.i8_boxer(self)
if len(values) == 0:
return boxer(tslib.iNaT)
values = values.view('i8')
result = func(values)
if com.is_list_like(result):
result = result.map(boxer)
else:
result = boxer(result)
else:
# let the function return nan if appropriate
if dropna:
if len(values) == 0:
return np.nan
result = func(values)
return result
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
# be subclass-friendly
new_values = com.take_1d(self.get_values(), indexer)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex; this is for compat with
higher dims
"""
return False
@Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, **kwargs):
return super(Series, self).rename(index=index, **kwargs)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, **kwargs):
return super(Series, self).reindex(index=index, **kwargs)
@Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Series, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, **kwargs):
return super(Series, self).shift(periods=periods, freq=freq,
axis=axis, **kwargs)
def reindex_axis(self, labels, axis=0, **kwargs):
""" for compatibility with higher dims """
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
return self.reindex(index=labels, **kwargs)
def take(self, indices, axis=0, convert=True, is_copy=False):
"""
return Series corresponding to requested indices
Parameters
----------
indices : list / array of ints
convert : translate negative to positive indices (default)
Returns
-------
taken : Series
See also
--------
numpy.ndarray.take
"""
# check/convert indicies here
if convert:
indices = maybe_convert_indices(
indices, len(self._get_axis(axis)))
indices = com._ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self.values.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def isin(self, values):
"""
Return a boolean :class:`~pandas.Series` showing whether each element
in the :class:`~pandas.Series` is exactly contained in the passed
sequence of ``values``.
Parameters
----------
values : list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
``list`` of one element.
Returns
-------
isin : Series (bool dtype)
Raises
------
TypeError
* If ``values`` is a string
See Also
--------
pandas.DataFrame.isin
Examples
--------
>>> s = pd.Series(list('abc'))
>>> s.isin(['a', 'c', 'e'])
0 True
1 False
2 True
dtype: bool
Passing a single string as ``s.isin('a')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['a'])
0 True
1 False
2 False
dtype: bool
"""
if not com.is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to Series.isin(), you passed a "
"{0!r}".format(type(values).__name__))
# may need i8 conversion for proper membership testing
comps = _values_from_object(self)
if com.is_datetime64_dtype(self):
from pandas.tseries.tools import to_datetime
values = Series(to_datetime(values)).values.view('i8')
comps = comps.view('i8')
elif com.is_timedelta64_dtype(self):
from pandas.tseries.timedeltas import to_timedelta
values = Series(to_timedelta(values)).values.view('i8')
comps = comps.view('i8')
value_set = set(values)
result = lib.ismember(comps, value_set)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right. NA values
will be treated as False
Parameters
----------
left : scalar
Left boundary
right : scalar
Right boundary
Returns
-------
is_between : Series
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@classmethod
def from_csv(cls, path, sep=',', parse_dates=True, header=None,
index_col=0, encoding=None, infer_datetime_format=False):
"""
Read delimited file into Series
Parameters
----------
path : string file path or file handle / StringIO
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default 0
Row to use at header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
y : Series
"""
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=header, index_col=index_col,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.icol(0)
result.index.name = result.name = None
return result
def to_csv(self, path, index=True, sep=",", na_rep='',
float_format=None, header=False,
index_label=None, mode='w', nanRep=None, encoding=None,
date_format=None, decimal='.'):
"""
Write Series to a comma-separated values (csv) file
Parameters
----------
path : string file path or file handle / StringIO. If None is provided
the result is returned as a string.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
header : boolean, default False
Write out series name
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
mode : Python write mode, default 'w'
sep : character, default ","
Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for European data
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
# result is only a string if no path provided, otherwise None
result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
float_format=float_format, header=header,
index_label=index_label, mode=mode, nanRep=nanRep,
encoding=encoding, date_format=date_format, decimal=decimal)
if path is None:
return result
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return Series without null values
Returns
-------
valid : Series
inplace : boolean, default False
Do operation in place.
"""
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axis = self._get_axis_number(axis or 0)
result = remove_na(self)
if inplace:
self._update_inplace(result)
else:
return result
valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,
**kwargs)
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
mask = isnull(self.values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
mask = isnull(self.values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
#----------------------------------------------------------------------
# Time series-oriented methods
def asof(self, where):
"""
Return last good (non-NaN) value in TimeSeries if value is NaN for
requested date.
If there is no good value, NaN is returned.
Parameters
----------
where : date or array of dates
Notes
-----
Dates are assumed to be sorted
Returns
-------
value or NaN
"""
if isinstance(where, compat.string_types):
where = datetools.to_datetime(where)
values = self.values
if not hasattr(where, '__iter__'):
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
return np.nan
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
while isnull(values[loc]) and loc > 0:
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where)
locs = self.index.asof_locs(where, notnull(values))
new_values = com.take_1d(values, locs)
return self._constructor(new_values, index=where).__finalize__(self)
def to_timestamp(self, freq=None, how='start', copy=True):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
Returns
-------
ts : TimeSeries with DatetimeIndex
"""
new_values = self.values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert TimeSeries from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_values = self.values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
#------------------------------------------------------------------------------
# Datetimelike delegation methods
def _make_dt_accessor(self):
try:
return maybe_to_datetimelike(self)
except Exception:
raise AttributeError("Can only use .dt accessor with datetimelike "
"values")
dt = base.AccessorProperty(CombinedDatetimelikeProperties, _make_dt_accessor)
#------------------------------------------------------------------------------
# Categorical methods
def _make_cat_accessor(self):
if not com.is_categorical_dtype(self.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
return CategoricalAccessor(self.values, self.index)
cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor)
def _dir_deletions(self):
return self._accessors
def _dir_additions(self):
rv = set()
# these accessors are mutually exclusive, so break loop when one exists
for accessor in self._accessors:
try:
getattr(self, accessor)
rv.add(accessor)
break
except AttributeError:
pass
return rv
Series._setup_axes(['index'], info_axis=0, stat_axis=0,
aliases={'rows': 0})
Series._add_numeric_operations()
_INDEX_TYPES = ndarray, Index, list, tuple
#------------------------------------------------------------------------------
# Supplementary functions
def remove_na(series):
"""
Return series containing only true/non-NaN values, possibly empty.
"""
return series[notnull(_values_from_object(series))]
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass thru a non-Index """
if len(data) != len(index):
raise ValueError('Length of values does not match length of '
'index')
if isinstance(data, PeriodIndex):
data = data.asobject
elif isinstance(data, DatetimeIndex):
data = data._to_embed(keep_tz=True)
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M','m']:
data = _sanitize_array(data, index, copy=copy)
return data
def _sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
""" sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified """
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
arr = _possibly_cast_to_datetime(arr, dtype)
subarr = np.array(arr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if com.is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
if (com.is_datetime64_dtype(data.dtype) and
not com.is_datetime64_dtype(dtype)):
if dtype == object:
ints = np.asarray(data).view('i8')
subarr = tslib.ints_to_pydatetime(ints)
elif raise_cast_failure:
raise TypeError('Cannot cast datetime64 to %s' % dtype)
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
value = subarr[0]
subarr = np.empty(len(index), dtype=subarr.dtype)
subarr.fill(value)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
# backwards compatiblity
TimeSeries = Series
#----------------------------------------------------------------------
# Add plotting methods to Series
import pandas.tools.plotting as _gfx
Series.plot = _gfx.plot_series
Series.hist = _gfx.hist_series
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)
ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)
| mit |
asimshankar/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
malvikasharan/APRICOT | apricotlib/apricot_visualization.py | 1 | 22211 | #!/usr/bin/env python
# Description = Visualizes different output data from APRICOT analysis
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
try:
import subprocess
except ImportError:
print('Python package subprocess is missing. Please install/update.\n')
sys.exit(0)
try:
import shutil
except ImportError:
print('Python package shutil is missing. Please install/update.\n')
sys.exit(0)
class VizApricotAnalysis(object):
def __init__(self, annotation_scoring_data,
domain_file,
additional_annotation,
outpath):
self._annotation_scoring_data = annotation_scoring_data
self._domain_file = domain_file
self._additional_annotation = additional_annotation
self._outpath = outpath
self._sec_str = self._outpath+'/secondary_structure'
self._dom_highlight = self._outpath+'/domain_highlighting'
self._pdb_msa = self._outpath+'/homologous_pdb_msa'
self._overview = self._outpath+'/overview_and_statistics'
self._localize = self._outpath+'/subcellular_localization'
self._annotation_data = []
self._filter_viz_dict = {}
self._highlight_dict = {}
self._uid_key_dict = {}
self._dom_rank = {}
self._fasta_dict = {}
self._secstr_dict = {}
self._dom_annotation = {}
self._location_dict = defaultdict(lambda: defaultdict(lambda: []))
self._sec_str_color = {'H': '#FF6666', 'E': '#33CCCC', 'C': '#FFFFCC'}
self._localization_dict = defaultdict(
lambda: defaultdict(lambda: float))
self._color_list = (
"Blue", "Green", "Teal", "Lime", "SeaGreen", "MediumTurquoise",
"Pink", "DarkOliveGreen", "Indigo", "Orange", "SlateBlue",
"LawnGreen", "Brown", "LightSkyBlue", "LightGreen", "DarkOrchid",
"GoldenRod", "MidnightBlue", "LightPink", "Gold")
def viz_all_the_visualization_files(self):
self.viz_domain_data()
self.domain_highlight()
self.viz_annotation_scoring()
self.viz_secondary_structure()
self.viz_subcellular_localization()
self.viz_homologous_pdb_msa()
def viz_domain_data(self):
with open(self._domain_file, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
domain_info = DomainDataColumns(
entry.strip().split('\t'))
prot_name = domain_info.entry_name
prot_end = int(domain_info.length)-1
prot_key = '\n'.join(
["\tstart: 0,", "\tend: %s,"
% prot_end, '\tname: "%s",' % prot_name,
'\thref: "http://www.uniprot.org/uniprot/%s"'
% domain_info.uid])
self._uid_key_dict[domain_info.uid] = prot_key
self._location_dict[
domain_info.uid][domain_info.domain_id].append(
'\t{start: %s, end: %s}' % (
domain_info.start, domain_info.stop))
self._dom_annotation[
domain_info.domain_id] = domain_info.full_name
src = domain_info.resource
if src == 'CDD':
self._dom_rank.setdefault(
domain_info.uid+':CDD', []).append(
domain_info.domain_id)
self._highlight_dict.setdefault(
prot_key, []).append('\n'.join(
['\t\tstart: %s,' % domain_info.start,
'\t\tend: %s,' % domain_info.stop,
'\t\tdomain: {', '\t\t\tname: "%s",'
% domain_info.domain_id,
'\t\t\tid: %s,' % len(
self._dom_rank[domain_info.uid+':CDD']),
'\t\t\tdescription: "%s"},' %
domain_info.short_name,
'\t\tsource: {', '\t\t\tname: "CDD",',
'\t\t\thref: null,', '\t\t\tid: 1}']))
else:
self._dom_rank.setdefault(
domain_info.uid+':IPR', []).append(
domain_info.domain_id)
self._highlight_dict.setdefault(
prot_key, []).append('\n'.join(
['start: %s,' % domain_info.start,
'end: %s,' % domain_info.stop,
'domain: {', '\t\tname: "%s",' %
domain_info.domain_id,
'\t\tid: %s,' % len(
self._dom_rank[domain_info.uid+':IPR']),
'\t\tdescription: "%s"},' % domain_info.short_name,
'source: {', '\t\tname: "InterPro",',
'\t\thref: null,', '\t\tid: 2}']))
return self._uid_key_dict, self._location_dict, self._dom_annotation, self._dom_highlight, self._highlight_dict
def domain_highlight(self):
for uid in self._uid_key_dict.keys():
header = '\n'.join(['<meta charset="UTF-8">'
'<link type="text/css" rel="stylesheet" href="http://parce.li/bundle/biojs-vis-protein-viewer@0.1.4">',
'<script src="https://wzrd.in/bundle/biojs-vis-protein-viewer@0.1.4"></script>',
'<div id="j-main">', '</div>', '<script>',
'var ProteinViewer = require("biojs-vis-protein-viewer");'])
body = '\n'.join(['var highlightData = [', '\t{',
'\n\t},\n\t{\n'.join(self._highlight_dict[
self._uid_key_dict[uid]]), '\t}', '];'])
panel = '\n'.join(['var highlightLocusData = {',
self._uid_key_dict[uid], '};'])
footer = '\n'.join([
'var pv = new ProteinViewer({',
'\tel: document.getElementById("j-main"),',
'\tdata: highlightData,',
'\tlocusData: highlightLocusData', '});',
'pv.render();', '</script>'])
with open(self._dom_highlight+'/%s.html' % uid, 'w') as out_fh:
out_fh.write('\n'.join([header, body, panel, footer]))
def viz_annotation_scoring(self):
if os.path.exists(self._annotation_scoring_data):
with open(self._annotation_scoring_data, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
self._filter_viz_dict.setdefault('filter1_list', []).append(
float(entry.strip().split('\t')[-5]))
self._filter_viz_dict.setdefault('filter2_list', []).append(
float(entry.strip().split('\t')[-4]))
self._filter_viz_dict.setdefault('filter3_list', []).append(
float(entry.strip().split('\t')[-3]))
self._filter_viz_dict.setdefault('filter4_list', []).append(
float(entry.strip().split('\t')[-2]))
self._filter_viz_dict.setdefault('bayscore_list', []).append(
float(entry.strip().split('\t')[-1]))
try:
label_list = range(0, len(self._filter_viz_dict['bayscore_list']))
plt.plot(sorted(self._filter_viz_dict['filter1_list']), 'ro', label='Filter-1 Score')
plt.plot(sorted(self._filter_viz_dict['filter2_list']), 'ys', label='Filter-2 Score')
plt.plot(sorted(self._filter_viz_dict['filter3_list']), 'g8', label='Filter-3 Score')
plt.plot(sorted(self._filter_viz_dict['filter4_list']), 'mp', label='Filter-4 Score')
plt.plot(sorted(self._filter_viz_dict['bayscore_list']), 'b^', label='Bayesian Score')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.xticks(label_list)
plt.xlabel('Annotation scores of selected proteins')
plt.ylabel('Filter/Bayesian score')
plt.savefig(os.path.join(self._overview, 'viz_annotation_scoring.png'))
except KeyError:
print("!!! The annotation scoring file seems to be empty."
" Please reanalyse annotation score using the subcommand 'annoscore' !!!")
else:
print('The data for annotation scores do not exist,'
'please calculate the annotation score using the subcommand'
'"annoscore", the flag "-nd" can be used to specify the absolute path for needle.')
def viz_secondary_structure(self):
for uid in self._uid_key_dict.keys():
if uid+'.horiz' in os.listdir(
self._additional_annotation+'/protein_secondary_structure/'):
files = uid+'.horiz'
elif uid+'.plain' in os.listdir(
self._additional_annotation+'/protein_secondary_structure/'):
files = uid+'.plain'
print("\nRaptorX secondary structure files are unavailable.")
print("Visualizing secondary structure using literature based analysis.\n")
else:
print("\nRaptorX/literature-based secondary structure files are unavailable.")
print("Exiting the current analysis.")
print("Please re-run the secondary structure prediction by RaptorX\n")
return
secstr_list = []
uid_secstr_dict = {}
sec_data_sites = []
with open(self._additional_annotation+
'/protein_secondary_structure/'+files, 'r') as in_fh:
for entry in in_fh:
if 'AA: ' in entry:
self._fasta_dict.setdefault(uid,
[]).append(entry.strip().split('AA: ')[1])
if 'Pred: ' in entry:
try:
secstr_list.append(entry.strip().split('Pred: ')[1])
except IndexError:
print("\nRaptorX output file is incomplete. Exiting the current analysis.")
print("Please re-run the secondary structure prediction by RaptorX\n")
return
for i, pred_data in enumerate(''.join(secstr_list)):
uid_secstr_dict[i] = pred_data
for j in range(len(uid_secstr_dict)-1):
if j == 0:
sec_data_sites.append(j)
if not uid_secstr_dict[j] == uid_secstr_dict[j+1]:
sec_data_sites.append(j+1)
self._secstr_dict.setdefault(uid, []).append(
'mySequence.addHighlight({start:%s, end:%s, color:"Black", background:"%s"});'
%(int(sec_data_sites[-2])+1, int(j)+1,
self._sec_str_color[uid_secstr_dict[j]]))
self._secstr_dict.setdefault(uid, []).append(
'mySequence.addHighlight({start:%s, end:%s, color:"Black", background:"%s"});'
%(int(sec_data_sites[-1])+1, int(list(uid_secstr_dict.keys())[-1])+1,
self._sec_str_color[uid_secstr_dict[j]]))
self.sec_str_script()
def sec_str_script(self):
for uid in self._fasta_dict.keys():
header = '\n'.join(['<meta charset="UTF-8">',
'<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>',
'<script src="https://wzrd.in/bundle/biojs-vis-sequence@0.1.7"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-fasta@latest"></script>',
'<div id="snippetDiv"></div>', '<script>',
'var yourDiv = document.getElementById("snippetDiv");',
'var Seq = require("biojs-vis-sequence");'])
footer = '\n'.join([
'mySequence.on("all",function(name,data){var obj = {name: name, data: data};if(inIframe()){ parent.postMessage(obj, "*") }})',
'mySequence.onAll(function(name,data){',
'console.log(arguments);', '});', '};',
'function inIframe(){try{return window.self!==window.top}catch(e){return true}}',
'</script>'])
body1 = '\n'.join(['var theSequence = "%s";' %
''.join(self._fasta_dict[uid]), 'yourDiv.textContent = "";',
'window.onload = function() {', 'var mySequence = new Seq({',
'\tsequence : theSequence,', '\ttarget : yourDiv.id,',
'\tformat : "CODATA",', '\tformatOptions : {',
'\ttitle:false,', '\tfooter:false', '\t},', '\tid : "%s"' % uid, '});'])
body2 = '\n'.join(self._secstr_dict[uid])
dom_list = sorted(list(self._location_dict[uid].keys()))
annotation_list = []
for dom_id in dom_list:
dom_idx = dom_list.index(dom_id)
annotation_list.append('\n'.join([
'mySequence.addAnnotation({', 'name:"Domain-%s",' % str(int(dom_idx)+1),
'html:"<br>%s<br>%s</b>",' % (dom_id,
self._dom_annotation[dom_id]), 'color:"%s",' % self._color_list[dom_idx],
'regions: [', ',\n'.join(self._location_dict[uid][dom_id]), ']});']))
with open(self._sec_str+'/'+uid+'.html', 'w') as out_fh:
out_fh.write('\n'.join([header, body1, '\n'.join(annotation_list),
body2, footer]))
def viz_subcellular_localization(self):
''''''
if 'psortb_data_summary.csv' in os.listdir(
self._additional_annotation+'/protein_localization'):
total_loc = set()
with open(
self._additional_annotation+'/protein_localization/psortb_data_summary.csv',
'r') as in_fh:
for entry in in_fh:
if not 'Localization' in entry:
protein = entry.strip().split('\t')[0]
localization = entry.strip().split('\t')[1]
if not localization.lower() == 'unknown':
score = float(entry.strip().split('\t')[2])
self._localization_dict[protein][localization] = score
total_loc.add(localization)
with open(self._localize+'/localization_table.csv', 'w') as out_fh:
out_fh.write('Proteins\t%s\n' % '\t'.join(sorted(list(total_loc))))
for each_prot in self._localization_dict.keys():
for localization in self._localization_dict[each_prot]:
entry_list = list('0'*len(total_loc))
loc_idx = sorted(list(total_loc)).index(localization)
entry_list[loc_idx] = self._localization_dict[each_prot][localization]
out_fh.write("%s\t%s\n" % (each_prot, '\t'.join(map(str, entry_list))))
self._create_localization_heatmap()
else:
print("\nPsortB-based localization prediction files are unavailable.")
print("Exiting the current analysis.")
print("Please re-run the localization prediction by PsortB\n")
return
def _create_localization_heatmap(self):
''''''
plot_file = self._localize+'/localization_heatmap.pdf'
infile = self._localize+'/localization_table.csv'
with open(self._localize+'/localization_heatmap.R', 'w') as r_fh:
r_fh.write('\n'.join(['library(gplots)', 'library(RColorBrewer)', 'display.brewer.all()',
'data <- read.csv("%s", header=T, sep = "\\t")' % infile,
'rnames <- data[,1]', 'data_matrix <- data.matrix(data[,2:ncol(data)])',
'data_matrix[is.na(data_matrix)] <- 0', 'data_matrix[is.nan(data_matrix)] <- 0',
'data_matrix[is.infinite(data_matrix)] <- max(data_matrix)',
'rownames(data_matrix) <- rnames', 'pdf(file="%s")' % plot_file,
'out_map <- heatmap.2(data_matrix, dendrogram = "none", Rowv = FALSE, \
Colv = FALSE, col=brewer.pal(9,"YlGn"), margins=c(5,8), \
cexCol=0.8, cexRow=0.8, key.title="PsortB Pred-value", key.xlab="", key.ylab="")',
'dev.off()']))
subprocess.Popen(['Rscript %s/localization_heatmap.R' %
self._localize], shell=True).wait()
def viz_homologous_pdb_msa(self):
header = '\n'.join(['<meta charset="UTF-8">',
'<link type="text/css" rel="stylesheet" href="http://parce.li/bundle/msa@0.4.8">',
'<script src="https://wzrd.in/bundle/msa@0.4.8"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-fasta@latest"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-clustal@latest"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-gff@latest"></script>',
'<script src="https://wzrd.in/bundle/xhr@latest"></script>',
'<div id="snippetDiv"></div>', '<script>',
'var rootDiv = document.getElementById("snippetDiv");',
'var msa = require("msa");', 'var menuDiv = document.createElement("div");',
'var msaDiv = document.createElement("div");',
'rootDiv.appendChild(menuDiv);', 'rootDiv.appendChild(msaDiv);'])
footer = '\n'.join(['opts.conf = {', '\tdropImport: true,',
'\tmanualRendering: true', '};', 'opts.vis = {', '\tconserv: false,',
'\toverviewbox: false,', '\tseqlogo: true,', '\tmetacell: true', '};',
'opts.zoomer = {', '\tlabelIdLength: 20', '};', 'var m = msa(opts);',
'gg = m;', 'm.u.file.importURL(url, function() {',
'\tvar defMenu = new msa.menu.defaultmenu({', '\t\tel: menuDiv,',
'\t\tmsa: m', '\t});', '\tdefMenu.render();', '\tm.render();', '});',
'm.g.on("all",function(name,data){var obj = {name: name, data: data};if(inIframe()){ parent.postMessage(obj, "*") }})',
'function inIframe(){try{return window.self!==window.top}catch(e){return true}}',
'</script>'])
body = '//EDIT PATH\n'.join([
'var url = "https://github.com/malvikasharan/APRICOT/blob/master/Biojs_dependencies/data/biojs_msa_tab.clustal";'
'var opts = {', '\tel: msaDiv', '};'])
with open(self._pdb_msa+'/Biojs_pdb_msa_tab.html', 'w') as out_fh:
out_fh.write('\n'.join([header, body, footer]))
for files in os.listdir(self._additional_annotation+'/pdb_sequence_prediction/'):
if '_top5.fasta' in files:
shutil.copyfile(
self._additional_annotation+'/pdb_sequence_prediction/'+files,
self._pdb_msa+'/'+files)
subprocess.Popen(['bin/reference_db_files/clustal/clustalw2 %s' %
self._pdb_msa+'/'+files], shell=True).wait()
print("\nPlease open the BioJS MSA tab generated in Biojs_pdb_msa_tab.html.")
print("Import MSA files (.aln) in the BioJS MSA tab to visualize the alignment.\n")
class AnnotationScoringColumns(object):
'''Column information of annotation scoring file'''
def __init__(self, row):
self.uid = row[0]
self.entry_name = row[1]
self.prot_name = row[2]
self.species = row[3]
self.length = row[4]
self.resource = row[5]
self.resource_id = row[6]
self.domain_id = row[7]
self.short_name = row[8]
self.full_name = row[9]
self.domain_length = row[10]
self.start = row[11]
self.stop = row[12]
self.ref_seq = row[13]
self.q_seq = row[14]
self.ref_ss = row[15]
self.q_ss = row[16]
self.mol_mass = row[17]
self.iso_pt = row[18]
self.solub = row[19]
self.vdw = row[20]
self.coverage = row[21]
self.cov_by_dom = row[22]
self.identity = row[23]
self.iden_by_cov = row[24]
self.similarity = row[25]
self.sim_by_cov = row[26]
self.gap = row[27]
self.gap_by_cov = row[28]
self.AA_RO = row[29]
self.SS_RO = row[30]
self.PC_RO = row[31]
self.AAC_ED = row[32]
self.PCC_ED = row[33]
self.DPC_ED = row[34]
self.TPC_ED = row[35]
class DomainDataColumns(object):
'''Column information of domain annotation file'''
def __init__(self, row):
self.uid = row[0]
self.entry_name = row[1]
self.prot_name = row[2]
self.species = row[3]
self.length = row[4]
self.gene_name = row[5]
self.locus_tag = row[6]
self.existance = row[7]
self.go = row[8]
self.embl_id = row[9]
self.pdb_id = row[10]
self.kegg_id = row[11]
self.interpro_id = row[12]
self.pfam_id = row[13]
self.pubmed_id = row[14]
self.resource = row[15]
self.resource_id = row[16]
self.domain_id = row[17]
self.short_name = row[18]
self.full_name = row[19]
self.dom_kw = row[20]
self.dom_go = row[21]
self.members = row[22]
self.dom_len = row[23]
self.start = row[24]
self.stop = row[25]
self.evalue = row[26]
self.bitscore = row[27]
self.bits = row[28]
self.cover_len = row[29]
self.cov_prcnt = row[30]
self.identity = row[31]
self.iden_prcnt = row[32]
self.similarity = row[33]
self.sim_prcnt = row[34]
self.gaps = row[35]
self.gap_prcnt = row[36]
self.filter_tag = row[37]
| isc |
jm-begon/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
joernhees/scikit-learn | examples/linear_model/plot_sgd_iris.py | 58 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/linear_model/_ridge.py | 2 | 77132 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from ._base import LinearClassifierMixin, LinearModel, _rescale_data
from ._sag import sag_solver
from ..base import RegressorMixin, MultiOutputMixin, is_classifier
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..utils.validation import _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics import check_scoring
from ..exceptions import ConvergenceWarning
from ..utils.sparsefuncs import mean_variance_axis
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0,
X_offset=None, X_scale=None):
def _get_rescaled_operator(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X1 = sparse.linalg.LinearOperator(shape=X.shape,
matvec=matvec,
rmatvec=rmatvec)
return X1
n_samples, n_features = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X1 = _get_rescaled_operator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _get_valid_accept_sparse(is_X_sparse, solver):
if is_X_sparse and solver in ['auto', 'sag', 'saga']:
return 'csr'
else:
return ['csr', 'csc', 'coo']
@_deprecate_positional_args
def ridge_regression(X, y, alpha, *, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False,
check_input=True):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {ndarray, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
alpha : float or array-like of shape (n_targets,)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \
default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000.
tol : float, default=1e-3
Precision of the solution.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
return _ridge_regression(X, y, alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input)
def _ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False,
X_scale=None, X_offset=None, check_input=True):
has_sw = sample_weight is not None
if solver == 'auto':
if return_intercept:
# only sag supports fitting intercept directly
solver = "sag"
elif not sparse.issparse(X):
solver = "cholesky"
else:
solver = "sparse_cg"
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd'"
" 'lsqr', 'sag' or 'saga'. Got %s." % solver)
if return_intercept and solver != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the "
"intercept. Please change solver to 'sag' or set "
"return_intercept=False.")
if check_input:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype,
order="C")
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha,
max_iter=max_iter,
tol=tol,
verbose=verbose,
X_offset=X_offset,
X_scale=X_scale)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ), dtype=X.dtype)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1),
dtype=X.dtype)}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init, is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X),
self.solver)
X, y = self._validate_data(X, y,
accept_sparse=_accept_sparse,
dtype=_dtype,
multi_output=True, y_numeric=True)
if sparse.issparse(X) and self.fit_intercept:
if self.solver not in ['auto', 'sparse_cg', 'sag']:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'sparse_cg', 'sag', or set `fit_intercept=False`"
.format(self.solver))
if (self.solver == 'sag' and self.max_iter is None and
self.tol > 1e-4):
warnings.warn(
'"sag" solver requires many iterations to fit '
'an intercept with sparse inputs. Either set the '
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
'not standardized).')
solver = 'sag'
else:
solver = 'sparse_cg'
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight, return_mean=True)
if solver == 'sag' and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver='sag',
random_state=self.random_state, return_n_iter=True,
return_intercept=True, check_input=False)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg solver
params = {'X_offset': X_offset, 'X_scale': X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_ = _ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False, check_input=False, **params)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape (n_samples, n_targets)).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, ndarray of shape (n_targets,)}, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : bool, default=True
Whether to fit the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. ``X`` and ``y`` are expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
tol : float, default=1e-3
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \
default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.17
`random_state` to support Stochastic Average Gradient.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See Also
--------
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
:class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick.
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y)
Ridge()
"""
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : returns an instance of self.
"""
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
This classifier first converts the target values into ``{-1, 1}`` and
then treats the problem as a regression task (multi-output regression in
the multiclass case).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float, default=1e-3
Precision of the solution.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \
default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
Attributes
----------
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
classes_ : ndarray of shape (n_classes,)
The classes labels.
See Also
--------
Ridge : Ridge regression.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y)
0.9595...
"""
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : object
Instance of the estimator.
"""
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X),
self.solver)
X, y = self._validate_data(X, y, accept_sparse=_accept_sparse,
multi_output=True, y_numeric=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
def _check_gcv_mode(X, gcv_mode):
possible_gcv_modes = [None, 'auto', 'svd', 'eigen']
if gcv_mode not in possible_gcv_modes:
raise ValueError(
"Unknown value for 'gcv_mode'. "
"Got {} instead of one of {}" .format(
gcv_mode, possible_gcv_modes))
if gcv_mode in ['eigen', 'svd']:
return gcv_mode
# if X has more rows than columns, use decomposition of X^T.X,
# otherwise X.X^T
if X.shape[0] > X.shape[1]:
return 'svd'
return 'eigen'
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
abs_cosine = np.abs(query.dot(vectors))
index = np.argmax(abs_cosine)
return index
class _X_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as centered and scaled X with an added intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_samples, n_features + 1))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
return safe_sparse_dot(
self.X, v[:-1], dense_output=True
) - self.sqrt_sw * self.X_mean.dot(v[:-1]) + v[-1] * self.sqrt_sw
def _matmat(self, v):
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True) -
self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + v[-1] *
self.sqrt_sw[:, None])
def _transpose(self):
return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)
class _XT_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as transposed centered and scaled X with an intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_features + 1, n_samples))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
n_features = self.shape[0]
res = np.empty(n_features, dtype=self.X.dtype)
res[:-1] = (
safe_sparse_dot(self.X.T, v, dense_output=True) -
(self.X_mean * self.sqrt_sw.dot(v))
)
res[-1] = np.dot(v, self.sqrt_sw)
return res
def _matmat(self, v):
n_features = self.shape[0]
res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)
res[:-1] = (
safe_sparse_dot(self.X.T, v, dense_output=True) -
self.X_mean[:, None] * self.sqrt_sw.dot(v)
)
res[-1] = np.dot(self.sqrt_sw, v)
return res
class _IdentityRegressor:
"""Fake regressor which will directly output the prediction."""
def decision_function(self, y_predict):
return y_predict
def predict(self, y_predict):
return y_predict
class _IdentityClassifier(LinearClassifierMixin):
"""Fake classifier which will directly output the prediction.
We inherit from LinearClassifierMixin to get the proper shape for the
output `y`.
"""
def __init__(self, classes):
self.classes_ = classes
def decision_function(self, y_predict):
return y_predict
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Leave-one-out Cross-Validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id).
Dual solution: c = G^-1y
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G^-1 = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G^-1)
The best score (negative mean squared error or user-provided scoring) is
stored in the `best_score_` attribute, and the selected hyperparameter in
`alpha_`.
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
@_deprecate_positional_args
def __init__(self, alphas=(0.1, 1.0, 10.0), *,
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False,
is_clf=False, alpha_per_target=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.is_clf = is_clf
self.alpha_per_target = alpha_per_target
@staticmethod
def _decomp_diag(v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
@staticmethod
def _diag_dot(D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples))
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(
X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m
- X_mX - X_mX.T, X_mean)
def _compute_covariance(self, X, sqrt_sw):
"""Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
if not self.fit_intercept:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X.T, X, dense_output=True), X_mean
# this function only gets called for sparse X
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples))
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
weight_sum = sqrt_sw.dot(sqrt_sw)
return (safe_sparse_dot(X.T, X, dense_output=True) -
weight_sum * np.outer(X_mean, X_mean),
X_mean)
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
"""Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitely centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
"""
intercept_col = scale = sqrt_sw
batch_size = X.shape[1]
diag = np.empty(X.shape[0], dtype=X.dtype)
for start in range(0, X.shape[0], batch_size):
batch = slice(start, min(X.shape[0], start + batch_size), 1)
X_batch = np.empty(
(X[batch].shape[0], X.shape[1] + self.fit_intercept),
dtype=X.dtype
)
if self.fit_intercept:
X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None]
X_batch[:, -1] = intercept_col[batch]
else:
X_batch = X[batch].A
diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
return diag
def _eigen_decompose_gram(self, X, y, sqrt_sw):
"""Eigendecomposition of X.X^T, used when n_samples <= n_features."""
# if X is dense it has already been centered in preprocessing
K, X_mean = self._compute_gram(X, sqrt_sw)
if self.fit_intercept:
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
K += np.outer(sqrt_sw, sqrt_sw)
eigvals, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return X_mean, eigvals, Q, QT_y
def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
"""
w = 1. / (eigvals + alpha)
if self.fit_intercept:
# the vector containing the square roots of the sample weights (1
# when no sample weights) is the eigenvector of XX^T which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight).
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, Q)
w[intercept_dim] = 0 # cancel regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_inverse_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def _eigen_decompose_covariance(self, X, y, sqrt_sw):
"""Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
"""
n_samples, n_features = X.shape
cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
if not self.fit_intercept:
cov = cov[:-1, :-1]
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
# when all samples have the same weight we add a column of 1
else:
cov[-1] = 0
cov[:, -1] = 0
cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
nullspace_dim = max(0, n_features - n_samples)
eigvals, V = linalg.eigh(cov)
# remove eigenvalues and vectors in the null space of X^T.X
eigvals = eigvals[nullspace_dim:]
V = V[:, nullspace_dim:]
return X_mean, eigvals, V, X
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
"""
w = 1 / (eigvals + alpha)
A = (V * w).dot(V.T)
AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
y_hat = safe_sparse_dot(X, AXy, dense_output=True)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
"""
# the vector [0, 0, ..., 0, 1]
# is the eigenvector of X^TX which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight), e.g. n when uniform sample weights.
intercept_sv = np.zeros(V.shape[0])
intercept_sv[-1] = 1
intercept_dim = _find_smallest_angle(intercept_sv, V)
w = 1 / (eigvals + alpha)
w[intercept_dim] = 1 / eigvals[intercept_dim]
A = (V * w).dot(V.T)
# add a column to X containing the square roots of sample weights
X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
AXy = A.dot(X_op.T.dot(y))
y_hat = X_op.dot(AXy)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
# return (1 - hat_diag), (y - y_hat)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
"""
if self.fit_intercept:
return self._solve_eigen_covariance_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X)
return self._solve_eigen_covariance_no_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X)
def _svd_decompose_design_matrix(self, X, y, sqrt_sw):
# X already centered
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
if self.fit_intercept:
# to emulate fit_intercept=True situation, add a column
# containing the square roots of the sample weights
# by centering, the other columns are orthogonal to that one
intercept_column = sqrt_sw[:, None]
X = np.hstack((X, intercept_column))
U, singvals, _ = linalg.svd(X, full_matrices=0)
singvals_sq = singvals ** 2
UT_y = np.dot(U.T, y)
return X_mean, singvals_sq, U, UT_y
def _solve_svd_design_matrix(
self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
"""
w = ((singvals_sq + alpha) ** -1) - (alpha ** -1)
if self.fit_intercept:
# detect intercept column
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, U)
# cancel the regularization for the intercept
w[intercept_dim] = - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_inverse_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float64],
multi_output=True, y_numeric=True)
# alpha_per_target cannot be used in classifier mode. All subclasses
# of _RidgeGCV that are classifiers keep alpha_per_target at its
# default value: False, so the condition below should never happen.
assert not (self.is_clf and self.alpha_per_target)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
if np.any(self.alphas <= 0):
raise ValueError(
"alphas must be positive. Got {} containing some "
"negative or null value instead.".format(self.alphas))
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = _check_gcv_mode(X, self.gcv_mode)
if gcv_mode == 'eigen':
decompose = self._eigen_decompose_gram
solve = self._solve_eigen_gram
elif gcv_mode == 'svd':
if sparse.issparse(X):
decompose = self._eigen_decompose_covariance
solve = self._solve_eigen_covariance
else:
decompose = self._svd_decompose_design_matrix
solve = self._solve_svd_design_matrix
n_samples = X.shape[0]
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
sqrt_sw = np.sqrt(sample_weight)
else:
sqrt_sw = np.ones(n_samples, dtype=X.dtype)
X_mean, *decomposition = decompose(X, y, sqrt_sw)
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
n_y = 1 if len(y.shape) == 1 else y.shape[1]
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if self.store_cv_values:
self.cv_values_ = np.empty(
(n_samples * n_y, n_alphas), dtype=X.dtype)
best_coef, best_score, best_alpha = None, None, None
for i, alpha in enumerate(np.atleast_1d(self.alphas)):
G_inverse_diag, c = solve(
float(alpha), y, sqrt_sw, X_mean, *decomposition)
if error:
squared_errors = (c / G_inverse_diag) ** 2
if self.alpha_per_target:
alpha_score = -squared_errors.mean(axis=0)
else:
alpha_score = -squared_errors.mean()
if self.store_cv_values:
self.cv_values_[:, i] = squared_errors.ravel()
else:
predictions = y - (c / G_inverse_diag)
if self.store_cv_values:
self.cv_values_[:, i] = predictions.ravel()
if self.is_clf:
identity_estimator = _IdentityClassifier(
classes=np.arange(n_y)
)
alpha_score = scorer(identity_estimator,
predictions, y.argmax(axis=1))
else:
identity_estimator = _IdentityRegressor()
if self.alpha_per_target:
alpha_score = np.array([
scorer(identity_estimator,
predictions[:, j], y[:, j])
for j in range(n_y)
])
else:
alpha_score = scorer(identity_estimator,
predictions.ravel(), y.ravel())
# Keep track of the best model
if best_score is None:
# initialize
if self.alpha_per_target and n_y > 1:
best_coef = c
best_score = np.atleast_1d(alpha_score)
best_alpha = np.full(n_y, alpha)
else:
best_coef = c
best_score = alpha_score
best_alpha = alpha
else:
# update
if self.alpha_per_target and n_y > 1:
to_update = alpha_score > best_score
best_coef[:, to_update] = c[:, to_update]
best_score[to_update] = alpha_score[to_update]
best_alpha[to_update] = alpha
elif alpha_score > best_score:
best_coef, best_score, best_alpha = c, alpha_score, alpha
self.alpha_ = best_alpha
self.best_score_ = best_score
self.dual_coef_ = best_coef
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
X_offset += X_mean * X_scale
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, n_alphas
else:
cv_values_shape = n_samples, n_y, n_alphas
self.cv_values_ = self.cv_values_.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
@_deprecate_positional_args
def __init__(self, alphas=(0.1, 1.0, 10.0), *,
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None, store_cv_values=False,
alpha_per_target=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.alpha_per_target = alpha_per_target
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
cv = self.cv
if cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True"
" are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True"
" are incompatible")
parameters = {'alpha': self.alphas}
solver = 'sparse_cg' if sparse.issparse(X) else 'auto'
model = RidgeClassifier if is_classifier(self) else Ridge
gs = GridSearchCV(model(fit_intercept=self.fit_intercept,
normalize=self.normalize,
solver=solver),
parameters, cv=cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.best_score_ = gs.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
return self
class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
If using Leave-One-Out cross-validation, alphas must be positive.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If None, the negative mean squared error if cv is 'auto' or None
(i.e. when using leave-one-out cross-validation), and r2 score
otherwise.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {'auto', 'svd', eigen'}, default='auto'
Flag indicating which strategy to use when performing
Leave-One-Out Cross-Validation. Options are::
'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'
'svd' : force use of singular value decomposition of X when X is
dense, eigenvalue decomposition of X^T.X when X is sparse.
'eigen' : force computation via eigendecomposition of X.X^T
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending on the shape of the training data.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
alpha_per_target : bool, default=False
Flag indicating whether to optimize the alpha value (picked from the
`alphas` parameter list) for each target separately (for multi-output
settings: multiple prediction targets). When set to `True`, after
fitting, the `alpha_` attribute will contain a value for each target.
When set to `False`, a single alpha is used for all targets.
.. versionadded:: 0.24
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_alphas) or \
shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only available if
``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been
called, this attribute will contain the mean squared errors
(by default) or the values of the ``{loss,score}_func`` function
(if provided in the constructor).
coef_ : ndarray of shape (n_features) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float or ndarray of shape (n_targets,)
Estimated regularization parameter, or, if ``alpha_per_target=True``,
the estimated regularization parameter for each target.
best_score_ : float or ndarray of shape (n_targets,)
Score of base estimator with best alpha, or, if
``alpha_per_target=True``, a score for each target.
.. versionadded:: 0.23
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.5166...
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
"""
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation. Currently,
only the n_features > n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors (by default) or the values of the
``{loss,score}_func`` function (if provided in the constructor). This
attribute exists only when ``store_cv_values`` is True.
coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
best_score_ : float
Score of base estimator with best alpha.
.. versionadded:: 0.23
classes_ : ndarray of shape (n_classes,)
The classes labels.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.9630...
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
@_deprecate_positional_args
def __init__(self, alphas=(0.1, 1.0, 10.0), *, fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True, y_numeric=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
target = Y if self.cv is None else y
_BaseRidgeCV.fit(self, X, target, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
| bsd-3-clause |
jensreeder/scikit-bio | skbio/diversity/beta/__init__.py | 1 | 6898 | """
Beta diversity measures (:mod:`skbio.diversity.beta`)
=====================================================
.. currentmodule:: skbio.diversity.beta
This package contains helper functions for working with scipy's pairwise
distance (``pdist``) functions in scikit-bio, and will eventually be expanded
to contain pairwise distance/dissimilarity methods that are not implemented
(or planned to be implemented) in scipy.
The functions in this package currently support applying ``pdist`` functions
to all pairs of samples in a sample by observation count or abundance matrix
and returning an ``skbio.DistanceMatrix`` object. This application is
illustrated below for a few different forms of input.
Functions
---------
.. autosummary::
:toctree: generated/
pw_distances
pw_distances_from_table
Examples
--------
Create a table containing 7 OTUs and 6 samples:
.. plot::
:context:
>>> from skbio.diversity.beta import pw_distances
>>> import numpy as np
>>> data = [[23, 64, 14, 0, 0, 3, 1],
... [0, 3, 35, 42, 0, 12, 1],
... [0, 5, 5, 0, 40, 40, 0],
... [44, 35, 9, 0, 1, 0, 0],
... [0, 2, 8, 0, 35, 45, 1],
... [0, 0, 25, 35, 0, 19, 0]]
>>> ids = list('ABCDEF')
Compute Bray-Curtis distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> bc_dm = pw_distances(data, ids, "braycurtis")
>>> print(bc_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.78787879 0.86666667 0.30927835 0.85714286 0.81521739]
[ 0.78787879 0. 0.78142077 0.86813187 0.75 0.1627907 ]
[ 0.86666667 0.78142077 0. 0.87709497 0.09392265 0.71597633]
[ 0.30927835 0.86813187 0.87709497 0. 0.87777778 0.89285714]
[ 0.85714286 0.75 0.09392265 0.87777778 0. 0.68235294]
[ 0.81521739 0.1627907 0.71597633 0.89285714 0.68235294 0. ]]
Compute Jaccard distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> j_dm = pw_distances(data, ids, "jaccard")
>>> print(j_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.83333333 1. 1. 0.83333333 1. ]
[ 0.83333333 0. 1. 1. 0.83333333 1. ]
[ 1. 1. 0. 1. 1. 1. ]
[ 1. 1. 1. 0. 1. 1. ]
[ 0.83333333 0.83333333 1. 1. 0. 1. ]
[ 1. 1. 1. 1. 1. 0. ]]
Determine if the resulting distance matrices are significantly correlated
by computing the Mantel correlation between them. Then determine if the
p-value is significant based on an alpha of 0.05:
>>> from skbio.stats.distance import mantel
>>> r, p_value, n = mantel(j_dm, bc_dm)
>>> print(r)
-0.209362157621
>>> print(p_value < 0.05)
False
Compute PCoA for both distance matrices, and then find the Procrustes
M-squared value that results from comparing the coordinate matrices.
>>> from skbio.stats.ordination import PCoA
>>> bc_pc = PCoA(bc_dm).scores()
>>> j_pc = PCoA(j_dm).scores()
>>> from skbio.stats.spatial import procrustes
>>> print(procrustes(bc_pc.site, j_pc.site)[2])
0.466134984787
All of this only gets interesting in the context of sample metadata, so
let's define some:
>>> import pandas as pd
>>> try:
... # not necessary for normal use
... pd.set_option('show_dimensions', True)
... except KeyError:
... pass
>>> sample_md = {
... 'A': {'body_site': 'gut', 'subject': 's1'},
... 'B': {'body_site': 'skin', 'subject': 's1'},
... 'C': {'body_site': 'tongue', 'subject': 's1'},
... 'D': {'body_site': 'gut', 'subject': 's2'},
... 'E': {'body_site': 'tongue', 'subject': 's2'},
... 'F': {'body_site': 'skin', 'subject': 's2'}}
>>> sample_md = pd.DataFrame.from_dict(sample_md, orient='index')
>>> sample_md
subject body_site
A s1 gut
B s1 skin
C s1 tongue
D s2 gut
E s2 tongue
F s2 skin
<BLANKLINE>
[6 rows x 2 columns]
Now let's plot our PCoA results, coloring each sample by the subject it
was taken from:
>>> fig = bc_pc.plot(sample_md, 'subject',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by subject', cmap='jet', s=50)
.. plot::
:context:
We don't see any clustering/grouping of samples. If we were to instead color
the samples by the body site they were taken from, we see that the samples
form three separate groups:
>>> import matplotlib.pyplot as plt
>>> plt.close('all') # not necessary for normal use
>>> fig = bc_pc.plot(sample_md, 'body_site',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by body site', cmap='jet', s=50)
Ordination techniques, such as PCoA, are useful for exploratory analysis. The
next step is to quantify the strength of the grouping/clustering that we see in
ordination plots. There are many statistical methods available to accomplish
this; many operate on distance matrices. Let's use ANOSIM to quantify the
strength of the clustering we see in the ordination plots above, using our
Bray-Curtis distance matrix and sample metadata.
First test the grouping of samples by subject:
>>> from skbio.stats.distance import anosim
>>> results = anosim(bc_dm, sample_md, column='subject', permutations=999)
>>> results['test statistic']
-0.4074074074074075
>>> results['p-value'] < 0.1
False
The negative value of ANOSIM's R statistic indicates anti-clustering and the
p-value is insignificant at an alpha of 0.1.
Now let's test the grouping of samples by body site:
>>> results = anosim(bc_dm, sample_md, column='body_site', permutations=999)
>>> results['test statistic']
1.0
>>> results['p-value'] < 0.1
True
The R statistic of 1.0 indicates strong separation of samples based on body
site. The p-value is significant at an alpha of 0.1.
References
----------
.. [1] http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._base import pw_distances, pw_distances_from_table
__all__ = ["pw_distances", "pw_distances_from_table"]
test = TestRunner(__file__).test
| bsd-3-clause |
formath/mxnet | docs/mxdoc.py | 11 | 12953 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
# import StringIO from io for python3 compatibility
from io import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, blk_lang, lines in _get_blocks(lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download-btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 |
mbonsma/studyGroup | lessons/python/matplotlib/hwk3.1.py | 12 | 2149 | # -*- coding: utf-8 -*-
from numpy import float32
from numpy import linspace
from numpy import polyfit
from numpy import polyval
import matplotlib.pyplot as plt
#Read in data from csv
f=open('data.csv','r')
line=f.readlines()
#Empty array for data
FN=[]
EFN=[]
#This loop goes through every line, strips new line character and then splits the data on ,. It will then save data into the arrays
for l in line:
a=l.strip()
x,y=a.split(",")
FN.append(float32(x))
EFN.append(float32(y))
f.close()
#Generate linear space but this was not used as of yet
z=linspace(-1,4)
#Create grid and plot data
fig = plt.figure(figsize = (4,4), dpi = 600)
a = fig.add_subplot(1,1,1)
plt.plot(FN,EFN,'ks',markersize=3)
#Created a fitted line for the data
fit=polyfit(FN,EFN,1)
plt.plot(z,polyval(fit,z),label=fit,color='k')
#Reset font size
for t in a.yaxis.get_major_ticks():
t.label.set_fontsize(6)
for t in a.xaxis.get_major_ticks():
t.label.set_fontsize(6)
#Set the subplot sizing
fig.subplots_adjust(top=0.95, right =0.89, left=0.13,bottom=0.25)
#Set limits and labels
plt.xlim(-0.2,3.5)
plt.ylim(0,0.8)
plt.ylabel(r'Extrafloral Nectar (mg of sugar per extrafloral nectary)',fontsize=6,verticalalignment='center')
plt.xlabel(r'Floral Nectar (mg of sugar per flower)',fontsize=6,horizontalalignment='center')
#Save as pdf
fig.savefig('EFNvFN.pdf',dpi=600)
plt.show()
"""In ecology, animals and plants interact with one another in an ecosystem.
There are several types of interactions that may occur such as predation,
parasitisim and mutualism. Mutualism is where the animals and plants both give
one another a survival benefit. So if a trait is not useful why invest energy
into producing it?
Different interactions have generally been studied individually even though
they occur in a community. This plot shows the relationship between EFN and FN
production in T. ulmifolia. There is a positive correlation, which suggests that
plants that produce more of one also produce more of the other
This is probably because of overall plant vigour. This was an initial figure
for a later experiment showing interactions."""
| apache-2.0 |
mandli/multilayer-examples | 1d/setplot_shelf.py | 1 | 12827 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
# Plot customization
import matplotlib
# Markers and line widths
matplotlib.rcParams['lines.linewidth'] = 2.0
matplotlib.rcParams['lines.markersize'] = 6
matplotlib.rcParams['lines.markersize'] = 8
# Font Sizes
matplotlib.rcParams['font.size'] = 16
matplotlib.rcParams['axes.labelsize'] = 15
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
# DPI of output images
matplotlib.rcParams['savefig.dpi'] = 300
# Need to do this after the above
import matplotlib.pyplot as mpl
from clawpack.pyclaw.solution import Solution
from multilayer.aux import bathy_index,kappa_index,wind_index
import multilayer.plot as plot
# matplotlib.rcParams['figure.figsize'] = [6.0,10.0]
def setplot(plotdata,eta=[0.0,-300.0],rho=[1025.0,1045.0],g=9.81,dry_tolerance=1e-3,bathy_ref_lines=[-30e3]):
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
# Fetch bathymetry once
b = Solution(0,path=plotdata.outdir,read_aux=True).state.aux[bathy_index,:]
# ========================================================================
# Plot variable functions
def bathy(cd):
return b
def kappa(cd):
return Solution(cd.frameno,path=plotdata.outdir,read_aux=True).state.aux[kappa_index,:]
def wind(cd):
return Solution(cd.frameno,path=plotdata.outdir,read_aux=True).state.aux[wind_index,:]
def h_1(cd):
return cd.q[0,:] / rho[0]
def h_2(cd):
return cd.q[2,:] / rho[1]
def eta_2(cd):
return h_2(cd) + bathy(cd)
def eta_1(cd):
return h_1(cd) + eta_2(cd)
def u_1(cd):
index = np.nonzero(h_1(cd) > dry_tolerance)
u_1 = np.zeros(h_1(cd).shape)
u_1[index] = cd.q[1,index] / cd.q[0,index]
return u_1
def u_2(cd):
index = np.nonzero(h_2(cd) > dry_tolerance)
u_2 = np.zeros(h_2(cd).shape)
u_2[index] = cd.q[3,index] / cd.q[2,index]
return u_2
def hu_1(cd):
index = np.nonzero(h_1(cd) > dry_tolerance)
hu_1 = np.zeros(h_1(cd).shape)
hu_1[index] = cd.q[1,index] / rho[0]
return hu_1
def hu_2(cd):
index = np.nonzero(h_2(cd) > dry_tolerance)
hu_2 = np.zeros(h_2(cd).shape)
hu_2[index] = cd.q[3,index] / rho[1]
return hu_2
# ========================================================================
# Labels
def add_bathy_dashes(current_data):
for ref_line in bathy_ref_lines:
mpl.plot([ref_line,ref_line],[-10,10],'k--')
def add_horizontal_dashes(current_data):
mpl.plot([-400e3,0.0],[0.0,0.0],'k--')
def km_labels(current_data):
r"""Flips xaxis and labels with km"""
mpl.xlabel('km')
locs,labels = mpl.xticks()
labels = np.flipud(locs)/1.e3
mpl.xticks(locs,labels)
def time_labels(current_data):
r"""Convert time to hours"""
pass
# ========================================================================
# Limit Settings
xlimits = [-400e3,0.0]
ylimits_depth = [-4000.0,100.0]
xlimits_zoomed = [-30e3-1e3,-30e3+1e3]
ylimits_surface_zoomed = [eta[0] - 0.5,eta[0] + 0.5]
ylimits_internal_zoomed = [eta[1] - 2.5,eta[1] + 2.5]
ylimits_momentum = [-40,10]
# ylimits_velocities = [-1.0,1.0]
ylimits_velocities = [-0.04,0.04]
ylimits_kappa = [0.0,1.2]
# Create data object
plotdata.clearfigures() # clear any old figures,axes,items data
# ========================================================================
# Function for doing depth drawing
# ========================================================================
def fill_items(plotaxes):
# Top layer
plotitem = plotaxes.new_plotitem(plot_type='1d_fill_between')
plotitem.plot_var = eta_1
plotitem.plot_var2 = eta_2
plotitem.color = plot.top_color
plotitem.plotstyle = plot.surface_linestyle
plotitem.show = True
# Bottom Layer
plotitem = plotaxes.new_plotitem(plot_type='1d_fill_between')
plotitem.plot_var = eta_2
plotitem.plot_var2 = bathy
plotitem.color = plot.bottom_color
plotitem.plotstyle = plot.internal_linestyle
plotitem.show = True
# Plot bathy
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = bathy
plotitem.plotstyle = plot.bathy_linestyle
plotitem.show = True
# Plot line in between layers
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = eta_2
plotitem.color = 'k'
plotitem.plotstyle = plot.internal_linestyle
plotitem.show = True
# Plot line on top layer
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = eta_1
plotitem.color = 'k'
plotitem.plotstyle = plot.surface_linestyle
plotitem.show = True
# ========================================================================
# Full Depths
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Full Depths',figno=102)
plotfigure.show = True
def bathy_axes(cd):
km_labels(cd)
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
mpl.xlabel('km')
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Full Depths'
plotaxes.xlimits = xlimits
plotaxes.ylimits = [-4100,100]
plotaxes.afteraxes = bathy_axes
fill_items(plotaxes)
# ========================================================================
# Momentum
# ========================================================================
plotfigure = plotdata.new_plotfigure(name="momentum")
plotfigure.show = True
def momentum_axes(cd):
km_labels(cd)
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
mpl.xlabel('km')
mpl.title("Layer Momenta at t = %4.1f s" % cd.t)
mpl.legend(['Top Layer Momentum','Bottom Layer Momentum'],loc=4)
def inset_momentum_axes(cd):
# TODO: This plot does not refresh correctly, skip the inset
fig = mpl.figure(cd.plotfigure.figno)
axes = fig.add_subplot(111)
# Plot main figure
axes.plot(cd.x, hu_1(cd), 'b-')
axes.plot(cd.x, hu_2(cd), 'k--')
axes.set_xlim(xlimits)
axes.set_ylim(ylimits_momentum)
momentum_axes(cd)
# Create inset plot
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
inset_axes = zoomed_inset_axes(axes, 0.5, loc=3)
inset_axes.plot(cd.x, hu_1(cd), 'b-')
inset_axes.plot(cd.x, hu_2(cd), 'k--')
inset_axes.set_xticklabels([])
inset_axes.set_yticklabels([])
x_zoom = [-120e3,-30e3]
y_zoom = [-10,10]
inset_axes.set_xlim(x_zoom)
inset_axes.set_ylim(y_zoom)
mark_inset(axes, inset_axes, loc1=2, loc2=4, fc='none', ec="0.5")
# mpl.ion()
mpl.draw()
# mpl.show()
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Momentum"
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_momentum
# plotaxes.afteraxes = inset_momentum_axes
# Top layer
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = hu_1
plotitem.plotstyle = 'b-'
plotitem.show = True
# Bottom layer
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = hu_2
plotitem.plotstyle = 'k--'
plotitem.show = True
# ========================================================================
# Velocities with Kappa
# ========================================================================
include_kappa = False
if include_kappa:
plotfigure = plotdata.new_plotfigure(name='Velocity and Kappa',figno=14)
else:
plotfigure = plotdata.new_plotfigure(name='Velocities',figno=14)
plotfigure.show = True
# plotfigure.kwargs = {'figsize':(7,6)}
def twin_axes(cd):
fig = mpl.gcf()
fig.clf()
# Get x coordinate values
x = cd.patch.dimensions[0].centers
# Draw velocity and kappa plot
vel_axes = fig.add_subplot(111) # the velocity scale
# kappa_axes = vel_axes.twinx() # the kappa scale
# Bottom layer velocity
bottom_layer = vel_axes.plot(x,u_2(cd),'k-',label="Bottom Layer Velocity")
# Top Layer velocity
top_layer = vel_axes.plot(x,u_1(cd),'b--',label="Top Layer velocity")
if include_kappa:
# Kappa
kappa_line = kappa_axes.plot(x,kappa(cd),'r-.',label="Kappa")
kappa_axes.plot(x,np.ones(x.shape),'r:')
vel_axes.set_xlabel('km')
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
for ref_line in bathy_ref_lines:
vel_axes.plot([ref_line,ref_line],ylimits_velocities,'k:')
if include_kappa:
vel_axes.set_title("Layer Velocities and Kappa at t = %4.1f s" % cd.t)
else:
vel_axes.set_title("Layer Velocities at t = %4.1f s" % cd.t)
vel_axes.set_ylabel('Velocities (m/s)')
vel_axes.set_xlim(xlimits)
vel_axes.set_ylim(ylimits_velocities)
if include_kappa:
plot.add_legend(vel_axes,'Kappa',location=3,color='r',linestyle='-.')
kappa_axes.set_ylabel('Kappa')
kappa_axes.set_ylim(ylimits_kappa)
else:
vel_axes.legend(loc=3)
try:
mpl.subplots_adjust(hspace=0.1)
except:
pass
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = twin_axes
# ========================================================================
# Combined Top and Internal Surface
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Zoomed Depths',figno=13)
plotfigure.show = True
plotfigure.kwargs = {'figsize':(6,6)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(2,1,1)'
plotaxes.title = 'Surfaces'
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_surface_zoomed
def top_afteraxes(cd):
mpl.xlabel('')
locs,labels = mpl.xticks()
# labels = np.flipud(locs)/1.e3
labels = ['' for i in range(len(locs))]
mpl.xticks(locs,labels)
add_bathy_dashes(cd)
mpl.ylabel('m')
mpl.title("Surfaces t = %4.1f s" % cd.t)
plotaxes.afteraxes = top_afteraxes
plotaxes = fill_items(plotaxes)
# Internal surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(2,1,2)'
plotaxes.title = ''
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_internal_zoomed
def internal_surf_afteraxes(cd):
km_labels(cd)
mpl.title('')
mpl.ylabel('m')
mpl.subplots_adjust(hspace=0.05)
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
mpl.xlabel('km')
plotaxes.afteraxes = internal_surf_afteraxes
plotaxes = fill_items(plotaxes)
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
# plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_framenos = [0,30,100,200,300]
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| mit |
camptocamp/QGIS | python/plugins/processing/algs/VectorLayerHistogram.py | 1 | 2809 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import *
from qgis.core import *
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterTableField import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import *
from processing.tools import dataobjects
from processing.parameters.ParameterNumber import ParameterNumber
class VectorLayerHistogram(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
FIELD = "FIELD"
BINS = "BINS"
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(uri)
fieldname = self.getParameterValue(self.FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, fieldname)
plt.close()
bins = self.getParameterValue(self.BINS)
plt.hist(values[fieldname], bins)
plotFilename = output +".png"
lab.savefig(plotFilename)
f = open(output, "w")
f.write("<img src=\"" + plotFilename + "\"/>")
f.close()
def defineCharacteristics(self):
self.name = "Vector layer histogram"
self.group = "Graphics"
self.addParameter(ParameterVector(self.INPUT, "Input layer", [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD, "Attribute", self.INPUT,ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterNumber(self.BINS, "number of bins", 2, None, 10))
self.addOutput(OutputHTML(self.OUTPUT, "Output"))
| gpl-2.0 |
scholer/py2cytoscape | py2cytoscape/data/network_view.py | 1 | 6734 | # -*- coding: utf-8 -*-
import json
import pandas as pd
import requests
from py2cytoscape.data.edge_view import EdgeView
from py2cytoscape.data.node_view import NodeView
from . import BASE_URL, HEADERS
from py2cytoscape.data.util_network import NetworkUtil
BASE_URL_NETWORK = BASE_URL + 'networks'
class CyNetworkView(object):
def __init__(self, network=None, suid=None):
if network is None:
raise ValueError('Network is required.')
# Validate required argument
if pd.isnull(suid):
raise ValueError("View SUID is missing.")
else:
self.__network = network
self.__id = suid
self.__url = BASE_URL_NETWORK + '/' \
+ str(self.__network.get_id()) + '/views/' + str(self.__id)
def get_id(self):
"""
Get session-unique ID of this network view
:return: SUID as integer
"""
return self.__id
def get_model_id(self):
"""
Get network model SUID
:return: model SUID as integer
"""
return self.__network.get_id()
def get_node_views(self):
return self.__get_views('nodes')
def get_edge_views(self):
return self.__get_views('edges')
def get_node_views_as_dict(self):
return self.__get_views('nodes', format='dict')
def get_edge_views_as_dict(self):
return self.__get_views('edges', format='dict')
def get_network_view_as_dict(self):
return self.__get_views('network', format='dict')
def get_node_views_as_dataframe(self):
return self.__get_views('nodes', format='df')
def get_edge_views_as_dataframe(self):
return self.__get_views('edges', format='df')
def __get_views(self, obj_type=None, format='view'):
url = self.__url + '/' + obj_type
views = requests.get(url).json()
if format is 'dict':
if obj_type is 'network':
return self.__get_network_view_dict(views)
else:
return self.__get_view_dict(views)
elif format is 'view':
return self.__get_view_objects(views, obj_type)
else:
raise ValueError('Format not supported: ' + format)
def __get_view_objects(self, views, obj_type):
view_list = []
if obj_type is 'nodes':
for view in views:
view = NodeView(self, view['SUID'], obj_type)
view_list.append(view)
elif obj_type is 'edges':
for view in views:
view = EdgeView(self, view['SUID'], obj_type)
view_list.append(view)
else:
raise ValueError('No such object type: ' + obj_type)
return view_list
def __get_view_dict(self, views):
# reformat return value to simple dict
view_dict = {}
for view in views:
key = view['SUID']
values = view['view']
# Flatten the JSON
key_val_pair = {}
for entry in values:
vp = entry['visualProperty']
value = entry['value']
key_val_pair[vp] = value
view_dict[key] = key_val_pair
return view_dict
def __get_view_df(self, views):
# reformat return value to simple dict
view_dict = {}
for view in views:
key = view['SUID']
values = view['view']
# Flatten the JSON
key_val_pair = {}
for entry in values:
vp = entry['visualProperty']
value = entry['value']
key_val_pair[vp] = value
view_dict[key] = key_val_pair
return view_dict
def __get_network_view_dict(self, values):
# reformat return value to simple dict
view_dict = {}
# Flatten the JSON
for entry in values:
vp = entry['visualProperty']
value = entry['value']
view_dict[vp] = value
return view_dict
def update_node_views(self, visual_property=None, values=None, key_type='suid'):
self.__update_views(visual_property, values, 'nodes', key_type)
def batch_update_node_views(self, value_dataframe=None):
self.__batch_update(value_dataframe, 'nodes')
def batch_update_edge_views(self, value_dataframe=None):
self.__batch_update(value_dataframe, 'edges')
def update_edge_views(self, visual_property=None, values=None, key_type='suid'):
self.__update_views(visual_property, values, 'edges', key_type)
def update_network_view(self, visual_property=None, value=None):
"""
Updates single value for Network-related VP.
:param visual_property:
:param value:
:return:
"""
new_value = [
{
"visualProperty": visual_property,
"value": value
}
]
requests.put(self.__url + '/network', data=json.dumps(new_value),
headers=HEADERS)
def __update_views(self, visual_property, values,
object_type=None, key_type='suid'):
if key_type is 'name':
name2suid = NetworkUtil.name2suid(self.__network)
body = []
for key in values.keys():
if key_type is 'name':
suid = name2suid[key]
if suid is None:
continue
else:
suid = key
new_value = self.__create_new_value(suid, visual_property,
values[key])
body.append(new_value)
requests.put(self.__url + '/' + object_type, data=json.dumps(body), headers=HEADERS)
def __create_new_value(self, suid, visual_property, value):
return {
"SUID": suid,
"view": [
{
"visualProperty": visual_property,
"value": value
}
]
}
def __batch_update(self, df, object_type=None):
body = []
columns = df.columns
for index, row in df.iterrows():
entry = {
'SUID': int(index),
'view': self.__create_new_values_from_row(columns, row)
}
body.append(entry)
requests.put(self.__url + '/' + object_type, data=json.dumps(body), headers=HEADERS)
def __create_new_values_from_row(self, columns, row):
views = []
for column in columns:
view = {
"visualProperty": column,
"value": row[column]
}
views.append(view)
return views
| mit |
pompiduskus/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
nliolios24/textrank | share/doc/networkx-1.9.1/examples/algorithms/blockmodel.py | 32 | 3009 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H=nx.connected_component_subgraphs(G)[0]
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| mit |
ggirelli/gpseq-img-py | pygpseq/anim/series.py | 1 | 12252 | # -*- coding: utf-8 -*-
'''
@author: Gabriele Girelli
@contact: gigi.ga90@gmail.com
@description: contains Series wrapper, which in turn contains Nucleus.
'''
# DEPENDENCIES =================================================================
import math
import os
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label
from pygpseq import const
from pygpseq.tools.binarize import Binarize
from pygpseq.tools import io as iot
from pygpseq.tools import image as imt
from pygpseq.tools import plot
from pygpseq.tools import stat as stt
from pygpseq.tools import string as st
from pygpseq.tools import vector as vt
from pygpseq.anim.nucleus import Nucleus
# CLASSES ======================================================================
class Series(iot.IOinterface):
"""Series (Field of View, i.e., two-channel image) wrapper.
Attributes:
__version__ (string): package version.
n (int): series id (1-indexed).
name (string): series name.
nuclei (list[pygpseq.wraps.Nuclei]): nuclei list.
basedir (string): series folder path.
dna_bg (float): estimated dna channel background.
sig_bg (float): estimated signal channel background.
flist (list): series file info.
"""
__version__ = const.VERSION
n = 0
name = ''
nuclei = []
basedir = '.'
dna_bg = None
sig_bg = None
filist = []
def __init__(self, ds, condition = None, **kwargs):
"""Run IOinterface __init__ method.
Args:
ds (dict): series information list.
condition (pyGPSeq.wraps.Condition): condition wrapper (opt).
"""
# If required, inherit from `condition` wrap
if None != condition:
logpath = condition.logpath
super(Series, self).__init__(path = logpath, append = True)
self.basedir = condition.path
else:
super(Series, self).__init__()
# Save input parameters
self.name = ds[0]
self.filist = ds[1]
self.n = ds[2]
def __getitem__(self, key):
""" Allow get item. """
if key in dir(self):
return(getattr(self, key))
else:
return(None)
def __setitem__(self, key, value):
""" Allow set item. """
if key in dir(self):
self.__setattr__(key, value)
def adjust_options(self, read_only_dna = None, log = None, **kwargs):
"""Adjust options to be passed to the Nucleus class.
Args:
dna_names (tuple[string]): dna channel names.
sig_names (tuple[string]): signal channel names.
an_type (pyGPSeq.const): analysis type.
Returns:
dict: adds the following kwargs:
series_name (string): series wrap name.
basedir (string): series wrap base directory.
dna_ch (numpy.array): image (dimensionality based on an_type).
sig_ch (numpy.array): image (dimensionality based on an_type).
"""
# Start log
if None == log: log = ''
# Only work on dna channel
if None == read_only_dna:
read_only_dna = False
# Add necessary options
kwargs['series_name'] = self.name
kwargs['basedir'] = self.basedir
# Read DNA channel
kwargs['dna_ch'], log = self.get_channel(kwargs['dna_names'],
log, **kwargs)
if not read_only_dna:
kwargs['sig_ch'], log = self.get_channel(kwargs['sig_names'],
log, **kwargs)
# Output
return((kwargs, log))
def export_nuclei(self, **kwargs):
"""Export current series nuclei. """
# Set output suffix
if not 'suffix' in kwargs.keys():
suffix = ''
else:
suffix = st.add_leading_dot(kwargs['suffix'])
# Add necessary options
self.printout('Current series: "' + self.name + '"...', 1)
kwargs, log = self.adjust_options(**kwargs)
# Export nuclei
[n.export(**kwargs) for n in self.nuclei]
# Produce log
log = np.zeros(len(self.nuclei), dtype = const.DTYPE_NUCLEAR_SUMMARY)
for l in [n.get_summary(**kwargs) for n in self.nuclei]:
# Append nuclear data to the series log
summary = [self.n]
summary.extend(l)
log[i, :] = summary
# Export series log
np.savetxt(kwargs['out_dir'] + self.name + '.summary' + suffix + '.csv',
log, delimiter = ',', comments = '',
header = ",".join([h for h in log.dtype.names]))
return(log)
def find_channel(self, channel_names):
"""Return the first channel to correspond to channel_names. """
# Fix the param type
if type(str()) == type(channel_names):
channel_names = [channel_names]
# Cycle through the available channels
for cname in channel_names:
# Identify the requested channel
idx = self.find_channel_id(cname)
# Return the channel
if -1 != idx:
return([i for i in self.filist.items()][idx])
# Return empty dictionary if no matching channel is found
return({})
def find_channel_id(self, channel_name):
"""Return the id of the channel file with the specified name. """
# Retrieve available channel names
names = self.get_channel_names()
if 0 != names.count(channel_name):
# Return matching channel id
return(names.index(channel_name))
else:
# Return -1 if no matching channel is found
return(-1)
def find_nuclei(self, **kwargs):
"""Segment current series.
Args:
**kwargs
dna_names (tuple[string]): dna channel names.
cond_name (string): condition wrapper name.
seg_type (pyGPSeq.const): segmentation type.
rm_z_tips (bool): remove nuclei touching the tips of the stack.
radius_interval (tuple[float]): allowed nuclear radius interval.
offset (tuple[int]): dimensions box/square offset.
aspect (tuple[float]): pixel/voxel dimension proportion.
Returns:
tuple: series current instance and log string.
"""
# Set output suffix
if not 'suffix' in kwargs.keys():
suffix = ''
else:
suffix = st.add_leading_dot(kwargs['suffix'])
# Check plotting
if not 'plotting' in kwargs.keys():
kwargs['plotting'] = True
log = ""
log += self.printout('Current series: "' + self.name + '"...', 1)
# Read images
kwargs, alog = self.adjust_options(read_only_dna = False, **kwargs)
log += alog
# Extract from kwargs
seg_type = kwargs['seg_type']
dna_ch = kwargs['dna_ch']
sig_ch = kwargs['sig_ch']
# Make new channel copy
i = dna_ch.copy()
# Produce a mask
bi = Binarize(path = kwargs['logpath'], append = True, **kwargs)
bi.verbose = self.verbose
mask, thr, tmp_log = bi.run(i)
log += tmp_log
# Estimate background
if None == self.dna_bg:
self.dna_bg = imt.estimate_background(dna_ch, mask, seg_type)
kwargs['dna_bg'] = self.dna_bg
if None == self.sig_bg:
self.sig_bg = imt.estimate_background(sig_ch, mask, seg_type)
kwargs['sig_bg'] = self.sig_bg
log += self.printout('Estimating background:', 2)
log += self.printout('DNA channel: ' + str(kwargs['dna_bg']), 3)
log += self.printout('Signal channel: ' + str(kwargs['sig_bg']), 3)
# Filter object size
mask, tmp_log = bi.filter_obj_XY_size(mask)
log += tmp_log
mask, tmp_log = bi.filter_obj_Z_size(mask)
log += tmp_log
# Save mask
log += self.printout('Saving series object mask...', 2)
L = label(mask)
# Plot
fig = plt.figure()
if 3 == len(mask.shape):
plt.imshow(L.max(0).astype('u4'))
else:
plt.imshow(L.astype('u4'))
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plot.set_font_size(kwargs['font_size'])
title = 'Nuclei in "' + kwargs['cond_name'] + '", ' + str(self.name)
title += ' [' + str(L.max()) + ' objects]'
plt.title(title)
# Export as png
fname = kwargs['out_dir'] + const.OUTDIR_MASK + kwargs['cond_name']
fname += '.' + self.name + '.mask' + suffix + '.png'
if kwargs['plotting']: plot.export(fname, 'png')
# Close plot figure
plt.close(fig)
# Initialize nuclei
log += self.printout('Bounding ' + str(L.max()) + ' nuclei...', 2)
kwargs['logpath'] = self.logpath
kwargs['i'] = i
kwargs['thr'] = thr
kwargs['series_id'] = self.n
seq = range(1, L.max() + 1)
self.nuclei = [Nucleus(n = n, mask = L == n, **kwargs) for n in seq]
return((self, log))
def get_c(self):
"""Return number of channels in the series. """
return(len(self.filist))
def get_channel(self, ch_name, log = None, **kwargs):
"""Read the series specified channel.
Args:
ch_name (string): channel name.
log (string): log string.
**kwargs
Returns:
tuple: channel image and log string.
"""
# Start log (used when verbosity is off)
if None == log: log = ""
log += self.printout('Reading channel "' + str(ch_name) + '"...', 2)
# Read channel
f = self.find_channel(ch_name)
imch = imt.read_tiff(os.path.join(self.basedir, f[0]))
imch = imt.slice_k_d_img(imch, 3)
# Deconvolved images correction
if 'rescale_deconvolved' in kwargs.keys():
if kwargs['rescale_deconvolved']:
# Get DNA scaling factor and rescale
sf = imt.get_rescaling_factor(f, **kwargs)
imch = (imch / sf).astype('float')
msg = 'Rescaling "' + f[0] + '" [' + str(sf) + ']...'
log += self.printout(msg, 3)
# Make Z-projection
if kwargs['an_type'] in [const.AN_SUM_PROJ, const.AN_MAX_PROJ]:
msg = 'Generating Z-projection [' + str(kwargs['an_type']) + ']...'
log += self.printout(msg, 3)
if 2 != len(imch.shape):
imch = imt.mk_z_projection(imch, kwargs['an_type'])
# Prepare output
return((imch, log))
def get_channel_names(self, channel_field = None):
"""Return the names of the channels in the series. """
if None == channel_field:
channel_field = const.REG_CHANNEL_NAME
return([c[channel_field] for c in self.filist.values()])
def get_nuclei_data(self, nuclei_ids, **kwargs):
"""Retrieve a single nucleus from the current series. """
# Read channel images
kwargs, log = self.adjust_options(**kwargs)
# Re-build mask
bi = Binarize(path = self.logpath, append = True, **kwargs)
bi.verbose = self.verbose
mask, thr, tmp_log = bi.run(kwargs['dna_ch'].copy())
log += tmp_log
# Empty nuclear data array
data = []
for nucleus_id in nuclei_ids:
# Select nucleus
n = self.nuclei[nucleus_id -1]
# Setup nucleus instance verbosity
if not self.verbose:
n.verbose = False
# Retrieve nuclear data
ndata, nlog = n.get_data(mask = mask, **kwargs)
# Update log and save nuclear data
log += nlog
data.append(ndata)
return((data, log))
def propagate_attr(self, key):
"""Propagate attribute current value to every nucleus. """
for i in range(len(self.nuclei)):
self.nuclei[i][key] = self[key]
# END ==========================================================================
################################################################################
| mit |
raincoatrun/ThinkStats2 | code/thinkplot.py | 75 | 18140 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
import warnings
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
if cls.color_iter is None:
cls.InitializeIter(7)
return cls.color_iter
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitializeIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (14, 6),
(1, 3): (14, 6),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(plot_number, rows=None, cols=None):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
pyplot.subplot(rows, cols, plot_number)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
global LOC
LOC = None
_Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def _UnderrideColor(options):
if 'color' in options:
return options
color_iter = _Brewer.GetIter()
if color_iter:
try:
options['color'] = next(color_iter)
except StopIteration:
# TODO: reconsider whether this should warn
# warnings.warn('Warning: Brewer ran out of colors.')
_Brewer.ClearIter()
return options
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.8, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Plots a line.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
warnings.warn("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
warnings.warn("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = _Underride(options,
fontsize=16,
verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
LEGEND = True
LOC = None
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
# looks like this is not necessary: matplotlib understands text loc specs
loc_dict = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
global LEGEND
LEGEND = options.get('legend', LEGEND)
if LEGEND:
global LOC
LOC = options.get('loc', LOC)
pyplot.legend(loc=LOC)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(pyplot.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt)
if clf:
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
text = Text
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| gpl-3.0 |
ltiao/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 30 | 44274 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
nelson-liu/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
OpenDrift/opendrift | tests/models/test_readers.py | 1 | 29038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
import unittest
from datetime import datetime, timedelta
import numpy as np
from opendrift.models.oceandrift import OceanDrift
from opendrift.models.leeway import Leeway
from opendrift.models.openoil import OpenOil
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_ROMS_native
from opendrift.readers import reader_global_landmask
from opendrift.readers import reader_constant
from opendrift.readers import reader_lazy
from opendrift.readers import reader_from_url
from opendrift.models.pelagicegg import PelagicEggDrift
from opendrift.readers import reader_current_from_track
o = OceanDrift(loglevel=20)
reader_list = [
'www.nonexistingurl.com',
o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc',
'/nonexistingdisk/nonexistingfile.ext',
o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc']
class TestReaders(unittest.TestCase):
"""Tests for readers"""
def test_adding_readers(self):
o = OceanDrift()
landmask = reader_global_landmask.Reader(
extent=[-1.5, 7, 59, 64])
r = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
o.add_reader([r, landmask])
self.assertEqual(o.priority_list['land_binary_mask'],
['roms native', 'global_landmask'])
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
# Switch order
o = OceanDrift()
o.add_reader([landmask, r])
self.assertEqual(o.priority_list['land_binary_mask'],
['global_landmask', 'roms native'])
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
# Test add_readers_from_list
o = OceanDrift()
o.add_readers_from_list(reader_list, lazy=False)
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
self.assertEqual(o.priority_list['x_wind'],
[o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc'])
def test_repeated_run(self):
# NOTE: this test fails if outfile is not None
#outfile = 'leeway_test.nc'
outfile = None
o = OceanDrift(loglevel=50)
o.set_config('drift:vertical_mixing', False)
o.add_readers_from_list(reader_list)
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5, outfile=outfile)
lon1 = o.get_property('lon')[0]
# Repeated run with same object
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5, outfile=outfile)
lon2 = o.get_property('lon')[0]
# Third run, with different config
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12),
wind_drift_factor=.1)
o.run(steps=5)
lon3 = o.get_property('lon')[0]
# Fourth run, with different time
o.reset() # Reset is needed due to new start_time
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 13),
wind_drift_factor=.1)
o.run(steps=5, outfile=outfile)
lon4 = o.get_property('lon')[0]
# Check results
self.assertEqual(lon1[-1][0], lon2[-1][0])
self.assertNotEqual(lon3[-1][0], lon2[-1][0])
#os.remove(outfile)
def test_reader_from_url(self):
readers = reader_from_url(reader_list)
self.assertIsNone(readers[0])
self.assertTrue(isinstance(readers[1],
reader_ROMS_native.Reader))
self.assertIsNone(readers[2])
self.assertTrue(isinstance(readers[3],
reader_netCDF_CF_generic.Reader))
def test_lazy_reader(self):
o = OceanDrift(loglevel=20)
lr = reader_lazy.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertFalse(lr.initialised)
self.assertEqual(len(lr.covers_positions([15], [69])[0]), 1)
self.assertEqual(len(lr.covers_positions([0], [0])[0]), 0)
self.assertTrue(lr.initialised)
# Make a corresponding, unlazy reader
rr = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertEqual(len(rr.covers_positions([15], [69])[0]), 1)
self.assertEqual(len(rr.covers_positions([0], [0])[0]), 0)
# Check that both readers provide the same attributes
for att in rr.__dict__:
self.assertEqual(type(lr.__getattr__(att)),
type(getattr(rr, att)))
if type(getattr(rr, att)) in [float, int, dict, str, list,
datetime, timedelta, bool,
np.float64]:
self.assertEqual(lr.__getattr__(att),
getattr(rr, att))
elif type(getattr(rr, att)) in [np.ndarray]:
self.assertIsNone(np.testing.assert_array_equal(
lr.__getattr__(att),
getattr(rr, att)))
else:
print('Skipping: ' + att + ' ' +
str(type(getattr(rr, att))))
def test_lazy_reader_oildrift(self):
o = OpenOil(loglevel=0)
reader_constant_wind = \
reader_constant.Reader({'x_wind':5, 'y_wind': 6,
'sea_ice_area_fraction': 0})
# Added ice area to prevent problems with masking
# with older versions of netCDF library
o.add_reader(reader_constant_wind)
o.add_readers_from_list(reader_list, lazy=True)
self.assertEqual(len(o._lazy_readers()), 4)
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5)
print(o) # Debug, this fails for old libraries
self.assertEqual(len(o._lazy_readers()), 2)
self.assertEqual(len(o.discarded_readers), 1)
def test_ROMS_native_stranding(self):
o = OceanDrift(loglevel=0)
r = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
o.add_reader(r)
o.set_config('general:use_auto_landmask', False)
o.set_config('drift:vertical_mixing', False)
o.set_config('environment:fallback:x_wind', 0)
o.set_config('environment:fallback:y_wind', 10)
o.seed_elements(lon=15.2, lat=68.3, time=r.start_time,
wind_drift_factor=.02,
number=10, radius=1000)
o.run(steps=8)
self.assertEqual(o.num_elements_deactivated(), 2)
#def test_lazy_readers_and_corrupt_data(self):
# o = OceanDrift(loglevel=0)
# o.add_readers_from_list([o.test_data_folder() +
# '2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc'])
# reader_constant_current_corrupt = \
# reader_constant.Reader({'x_sea_water_velocity': np.nan,
# 'y_sea_water_velocity': np.nan})
# o.add_reader(reader_constant_current_corrupt)
# o.add_readers_from_list([o.test_data_folder() +
# '2Feb2016_Nordic_sigma_3d/Arctic20_1to5Feb_2016.nc'])
# print o
# o.seed_elements(lon=14.5, lat=68, time=datetime(2016,2,4))
# o.set_config('environment:fallback:'x_wind', 0)
# o.set_config('environment:fallback:'y_wind', 0)
# o.set_config('environment:fallback:'x_sea_water_velocity', None)
# o.set_config('environment:fallback:'y_sea_water_velocity', None)
# o.set_config('environment:fallback:'land_binary_mask', 0)
# print o
# o.run(steps=1)
#def test_oildrift_backwards(self):
# o = OpenOil(loglevel=20)
# reader_constant_wind = \
# reader_constant.Reader({'x_wind':5, 'y_wind': 6})
# o.add_reader(reader_constant_wind)
# o.add_readers_from_list(reader_list, lazy=True)
# self.assertEqual(len(o._lazy_readers()), 4)
# o.seed_elements(lon=14, lat=67.85,
# time=datetime(2016, 2, 2, 12))
# o.set_config()
# o.run(steps=5)
# self.assertEqual(len(o._lazy_readers()), 2)
# self.assertEqual(len(o.discarded_readers), 1)
#def test_lazy_reader_oildrift_real(self):
# o = OpenOil(loglevel=0)
# o.add_readers_from_file(o.test_data_folder() +
# '../../opendrift/scripts/data_sources.txt')
# o.seed_elements(lon=4, lat=60.0,
# time=datetime(2018, 7, 2, 12))
# o.run(steps=5)
# print o
def test_lazy_reader_leeway_compare(self):
o1 = Leeway(loglevel=0)
#o1.set_config('environment:fallback:land_binary_mask', 0)
o1.required_variables = [r for r in o1.required_variables
if r != 'land_binary_mask']
o1.add_readers_from_list(reader_list, lazy=False)
time = o1.readers['roms native'].start_time
o1.seed_elements(lat=67.85, lon=14, time=time)
o1.run(steps=5)
o2 = Leeway(loglevel=20)
#o2.set_config('environment:fallback:land_binary_mask', 0)
o2.required_variables = [r for r in o1.required_variables
if r != 'land_binary_mask']
o2.add_readers_from_list(reader_list, lazy=True)
o2.seed_elements(lat=67.85, lon=14, time=time)
o2.run(steps=5)
# Some differences in wind and current components
# due to different coordinate system
for var in o1.history.dtype.names:
if var in ['x_wind', 'y_wind', 'x_sea_water_velocity',
'y_sea_water_velocity']:
tolerance = 1
else:
tolerance = 5
self.assertIsNone(np.testing.assert_array_almost_equal(
o1.history[var], o2.history[var], tolerance))
def test_constant_and_lazy_reader_leeway(self):
cw = reader_constant.Reader({'x_wind':5, 'y_wind': 6})
cc = reader_constant.Reader({'x_sea_water_velocity':0,
'y_sea_water_velocity': .2})
o = Leeway(loglevel=20)
o.add_reader([cw, cc])
o.add_readers_from_list(reader_list)
o.set_config('environment:fallback:x_sea_water_velocity', 0.0)
o.set_config('environment:fallback:y_sea_water_velocity', 0.1)
time = datetime(2016,2,2,12)
o.seed_elements(lat=67.85, lon=14, time=time)
o.run(steps=2)
self.assertAlmostEqual(o.elements.lat[0], 67.8548, 3)
def test_automatic_landmask(self):
o = OceanDrift(loglevel=20)
self.assertRaises(ValueError, o.run)
o.seed_elements(lon=4, lat=60, time=datetime(2016,9,1))
o.run(steps=2)
def test_reader_coverage(self):
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
# Element outside reader domain
self.assertEqual(len(r.covers_positions(5, 80)[0]), 0)
x, y = r.lonlat2xy(5, 80)
self.assertRaises(ValueError, r.check_arguments,
'y_sea_water_velocity', r.start_time, x, y, 0)
# Element inside reader domain
self.assertEqual(len(r.covers_positions(5, 60)[0]), 1)
x, y = r.lonlat2xy(5, 60)
var, time, x2, y2, z2, outside = \
r.check_arguments('y_sea_water_velocity', r.start_time, x, y, 0)
self.assertEqual(var, ['y_sea_water_velocity'])
self.assertEqual(time, r.start_time)
self.assertEqual(x, x2)
self.assertEqual(y, y2)
self.assertEqual(0, z2)
self.assertEqual(len(outside), 0)
def test_outside_reader_time_coverage(self):
o = PelagicEggDrift()
reader = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader(reader)
o.set_config('environment:fallback:x_sea_water_velocity', 1)
o.set_config('environment:fallback:land_binary_mask', 0)
o.set_config('drift:vertical_mixing', False)
o.seed_elements(lon=4.8, lat=60, number=1, time=reader.end_time)
o.run(steps=2)
# Check that fallback value is used when outside time coverage
self.assertEqual(o.history['x_sea_water_velocity'][0][-1], 1.0)
def test_reader_netcdf(self):
"""Check reader functionality."""
reader1 = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
reader2 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
readers = [reader1, reader2]
for r in readers:
print(r)
# Make four points:
# 1) outside lower left, 2) lower left, 3) center of domain
# 4) outside upper right
# and assure that only 2) and 3) are marked as covered
# Upper right is skipped, as lonlat2xy may lie slightly outside
x = np.array([r.xmin - r.delta_x, r.xmin, (r.xmin + r.xmax)/2,
r.xmax + r.delta_x])
y = np.array([r.ymin - r.delta_y, r.ymin, (r.ymin + r.ymax)/2,
r.ymax + r.delta_y])
lons, lats = r.xy2lonlat(x, y)
covered = r.covers_positions(lons, lats, 0)[0]
if len(covered) != 1:
self.assertEqual(covered.tolist(), [1, 2])
else:
if covered == [2]:
print('#'*60)
print('#'*60)
print('WARNING: A point on the boundary is considered ' \
'outside after conversion x,y -> lon,lat -> x,y. ' \
'This is different from "standard", but is due to ' \
'rounding differences and not considered to be an ' \
'error. Numpy version is %s' % (np.__version__))
print('#'*60)
print('#'*60)
else:
self.assertTrue(False) # Should never happen!
self.assertTrue(r.covers_time(r.start_time))
self.assertFalse(r.covers_time(r.start_time - r.time_step))
self.assertFalse(r.proj.crs.is_geographic)
def test_vertical_profiles(self):
norkyst3d = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
lon = np.array([4.73])
lat = np.array([62.35])
variables = ['x_sea_water_velocity', 'x_sea_water_velocity',
'sea_water_temperature']
x,y = norkyst3d.lonlat2xy(lon, lat)
data = norkyst3d.get_variables(variables,
time=norkyst3d.start_time,
x=x, y=y, z=[0, -100])
self.assertEqual(data['z'][4], -25)
self.assertEqual(data['z'][4], -25)
self.assertAlmostEqual(data['sea_water_temperature'][:,0,0][7],
9.220000267028809)
def test_vertical_interpolation(self):
norkyst3d = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
lon = np.array([4.73, 4.75])
lat = np.array([62.35, 62.30])
z = np.array([0, -33])
variables = ['x_sea_water_velocity', 'x_sea_water_velocity',
'sea_water_temperature']
# Call get_variables_interpolated which interpolates both in
# space (horizontally, vertically) and then in time
data, profiles = norkyst3d.get_variables_interpolated(
variables, profiles=['sea_water_temperature'],
profiles_depth = [-100, 0],
time = norkyst3d.start_time + timedelta(seconds=900),
lon=lon, lat=lat, z=z)
# Check surface value
self.assertEqual(data['sea_water_temperature'][0],
profiles['sea_water_temperature'][0,0])
# Check interpolated temperature at 33 m depth
self.assertAlmostEqual(data['sea_water_temperature'][1],
8.32, 2)
#import matplotlib.pyplot as plt
#plt.plot(profiles['sea_water_temperature'][:,0])
#plt.plot(profiles['sea_water_temperature'][:,1], 'r')
#plt.show()
def test_vertical_interpolation_sigma(self):
nordic3d = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
lon = np.array([12.46, 12.46, 12.46])
lat = np.array([68.21, 69.31, 69.31])
z = np.array([-33, 0, -2500])
x, y = nordic3d.lonlat2xy(lon, lat)
variables = ['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature']
# Call get_variables_interpolated which interpolates both in
data = nordic3d.get_variables(variables,
time = nordic3d.start_time + timedelta(seconds=900),
x=x, y=y, z=z)
self.assertAlmostEqual(data['sea_water_temperature'][0,60, 60],
3.447, 2)
#3.59, 2)
self.assertAlmostEqual(data['sea_water_temperature'][-1,60, 60],
-0.783, 2)
#-0.803, 2)
def test_get_environment(self):
o = PelagicEggDrift(loglevel=0)
reader_nordic = reader_ROMS_native.Reader(o.test_data_folder() + '2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc', name='Nordic')
reader_arctic = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '2Feb2016_Nordic_sigma_3d/Arctic20_1to5Feb_2016.nc', name='Arctic')
######################################################
# Vertical interpolation is another issue to be fixed:
reader_nordic.zlevels = reader_arctic.z
######################################################
o.add_reader([reader_nordic, reader_arctic])
# One point covered only by Nordic, two points coverd
# by both readers, and two points covered by none of the readers
testlon = np.array((14.0, 20.0, 20.1, 4, 5))
testlat = np.array((70.1, 76.0, 76.1, 60, 60))
testz = np.random.uniform(0, 0, len(testlon))
self.assertIsNone(np.testing.assert_array_almost_equal(
[0], reader_nordic.covers_positions(testlon, testlat, testz)[0]))
self.assertIsNone(np.testing.assert_array_almost_equal(
[0, 1, 2],
reader_arctic.covers_positions(testlon, testlat, testz)[0]))
o.seed_elements(testlon, testlat, z=testz, time=reader_nordic.start_time)
o.set_config('environment:fallback:land_binary_mask', 0)
env, env_profiles, missing = \
o.get_environment(list(o.required_variables),
reader_nordic.start_time,
testlon, testlat, testz,
o.required_profiles)
self.assertAlmostEqual(env['sea_water_temperature'][0], 4.251, 2)
self.assertAlmostEqual(env['sea_water_temperature'][1], 0.122, 3)
self.assertAlmostEqual(env['sea_water_temperature'][4], 10.0)
self.assertIsNone(np.testing.assert_array_almost_equal(
missing, [False,False,False,False,False]))
self.assertAlmostEqual(env_profiles['sea_water_temperature'][0,0],
4.251, 2)
self.assertAlmostEqual(env_profiles['sea_water_temperature'][0,4], 10)
#self.assertAlmostEqual(env_profiles['sea_water_temperature'][8,2], 10)
self.assertAlmostEqual(env_profiles['sea_water_temperature'][7,2],
2.159, 3)
# Get separate data
env2, env_profiles2, missing2 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
['sea_water_temperature'])
self.assertTrue(env_profiles2 is not None)
self.assertEqual(set(env_profiles2.keys()),
set(['z', 'sea_water_temperature']))
# Get separate data, without profile
env3, env_profiles3, missing3 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
profiles=None)
self.assertTrue(env_profiles3 is None)
# Get separate data
env4, env_profiles4, missing4 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
['sea_water_temperature'])
self.assertIsNone(np.testing.assert_array_almost_equal(
env['x_sea_water_velocity'],
env2['x_sea_water_velocity']))
self.assertIsNone(np.testing.assert_array_almost_equal(
env_profiles2['sea_water_temperature'].ravel(),
env_profiles4['sea_water_temperature'].ravel()))
def test_constant_reader(self):
o = OpenOil(loglevel=0)
cw = reader_constant.Reader({'x_wind':5, 'y_wind': 6})
cc = reader_constant.Reader({'x_sea_water_velocity':0, 'y_sea_water_velocity': .2})
cs = reader_constant.Reader({'sea_water_temperature': 278})
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader([cw, cc, r])
# TODO: should check why adding constant reader with
# sea_water_temperature gives Deprecated warning
#o.add_reader([cw, cc, cs, r])
o.seed_elements(lon=4, lat=60, time=r.start_time, number=5)
o.run(steps=3)
def test_clip_domain(self):
o = OceanDrift(loglevel=50)
r1 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
r1.clip_boundary_pixels(20)
r2 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertEqual(r2.shape, (151, 81))
self.assertEqual(r1.shape, (111, 41))
self.assertEqual(r1.xmin, 20)
o1 = OceanDrift(loglevel=50)
o1.set_config('environment:fallback:x_sea_water_velocity', None)
o1.add_reader(r1)
o1.seed_elements(lon=15, lat=70.1, time=r1.start_time)
o1.set_config('environment:fallback:land_binary_mask', 0)
o1.run(time_step=3600*3, duration=timedelta(hours=48))
o2 = OceanDrift(loglevel=50)
o2.set_config('environment:fallback:x_sea_water_velocity', None)
o2.add_reader(r2)
o2.seed_elements(lon=15, lat=70.1, time=r1.start_time)
o2.set_config('environment:fallback:land_binary_mask', 0)
o2.run(time_step=3600*3, duration=timedelta(hours=48))
# Compare
lat1 = o1.get_property('lat')[0]
lat2 = o2.get_property('lat')[0]
self.assertEqual(len(lat1), 13)
self.assertEqual(len(lat2), 17)
self.assertIsNone(np.testing.assert_allclose(
lat1[0:12], lat2[0:12]))
# Test reader netCDF_CF_generic
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
self.assertEqual(r.shape, (301, 201))
o3 = OceanDrift(loglevel=50)
o3.set_config('environment:fallback:x_sea_water_velocity', None)
o3.set_config('environment:fallback:land_binary_mask', 0)
o3.add_reader(r)
o3.seed_elements(lon=4.36, lat=61.7, time=r.start_time)
o3.run(steps=24)
r.clip_boundary_pixels(10)
self.assertEqual(r.shape, (281, 181))
o4 = OceanDrift(loglevel=50)
o4.set_config('environment:fallback:x_sea_water_velocity', None)
o4.set_config('environment:fallback:land_binary_mask', 0)
o4.add_reader(r)
o4.seed_elements(lon=4.36, lat=61.7, time=r.start_time)
o4.run(steps=24)
# Compare
lat3 = o3.get_property('lat')[0]
lat4 = o4.get_property('lat')[0]
self.assertEqual(len(lat3), 25)
self.assertEqual(len(lat4), 13)
self.assertIsNone(np.testing.assert_allclose(
lat3[0:12], lat4[0:12]))
def test_reader_current_from_track(self):
"""Check if extrapolated currents are of expected value"""
obslon = [3.1, 3.123456]
obslat = [61.1, 61.132198]
obstime = [datetime(2015, 11, 16, 0), datetime(2015, 11, 16, 6)]
o = OceanDrift(loglevel=20)
reader_wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
reader_current = reader_current_from_track.Reader(obslon, obslat, obstime,
wind_east=0, wind_north=0, windreader=reader_wind, wind_factor=0.018)
self.assertAlmostEqual(reader_current.x_sea_water_velocity.data[0],0.2236, 4)
def test_valid_minmax(self):
"""Check that invalid values are replaced with fallback."""
o = OceanDrift(loglevel=20)
from opendrift.readers.basereader import variables
minval = variables.standard_names['x_wind']['valid_min']
# Setting valid_min to -5, to check that replacement works
variables.standard_names['x_wind']['valid_min'] = -5
reader_wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
o.add_reader(reader_wind)
o.set_config('environment:fallback:x_sea_water_velocity', 0)
o.set_config('environment:fallback:x_wind', 2.0)
o.set_config('environment:fallback:y_sea_water_velocity', 0)
o.set_config('environment:fallback:land_binary_mask', 0)
o.seed_elements(lon=4, lat=60, time=reader_wind.start_time)
o.run(steps=1)
variables.standard_names['x_wind']['valid_min'] = minval # reset
w = o.get_property('x_wind')[0][0]
self.assertAlmostEqual(w, 2.0, 1)
def test_valid_minmax_nanvalues(self):
from opendrift.readers.basereader import variables
# Reducing max current speed to test masking
maxval = variables.standard_names['x_sea_water_velocity']['valid_max']
variables.standard_names['x_sea_water_velocity']['valid_max'] = .1
o = OceanDrift(loglevel=20)
o.set_config('environment:fallback:land_binary_mask', 0)
norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
o.add_reader(norkyst)
o.seed_elements(lon=4.95, lat=62, number=10, time=norkyst.start_time)
o.run(steps=2)
variables.standard_names['x_sea_water_velocity']['valid_max'] = maxval # reset
u = o.get_property('x_sea_water_velocity')[0]
self.assertAlmostEqual(u.max(), -.069, 3) # Some numerical error allowed
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
frank-tancf/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
MattNolanLab/Ramsden_MEC | ABAFunctions/ABA_errors.py | 1 | 4010 | '''
Code for error analysis
Copyright (c) 2014, Helen Ramsden
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import Image, ImageChops
import numpy as np
from scipy import ndimage
from GenericFunctions import checkOSpath, adjust_spines,st
import matplotlib.pyplot as plt
plt.rc('ytick', labelsize=12)
plt.rc('xtick', labelsize=12)
plt.rc('axes', labelsize=12)
plt.rc('axes', titlesize=20)
def checksegmented(segmaskfilepath,filedict,resultsfilepath):
'''
FUNCTION runs through all segmented masks and checks location of centre of mass and size of mask
input SegmentedMask/
output is a list containing name of file, size of mask,
'''
newfile = open(resultsfilepath + 'maskstatssize.txt','w')
for f in filedict:
# print f
newfile.write(f )
for filepath in [segmaskfilepath]:#, segmask2filepath]:
maskim = Image.open(filepath+ f).convert('L') # need to convert to 8 bit (not rgb)
maskim = ImageChops.invert(maskim)
maskarray = np.array(maskim)
# print maskarray.shape
com = ndimage.measurements.center_of_mass(maskarray)
blackpixels = np.nonzero(maskarray==0)
whitepixels = np.nonzero(maskarray>0)
# print len(blackpixels[0]),len(whitepixels[0])
masksize = len(blackpixels[0])
newfile.write('\t' + '\t'.join([str(com[0]),str(com[1]),str(masksize)]))
newfile.write('\n')
def plotmi():
'''
Plot the distribution of MI scores from the registration output
'''
milog = np.loadtxt('alllogdata.txt',delimiter = '\t',dtype = float,usecols=[2,3])
diffs = milog[:,0] - milog[:,1]
milognew = np.ma.masked_array(milog, np.isnan(milog))
diffsnew = np.ma.masked_array(diffs, np.isnan(diffs))
# Get rid of nans
milogmaskpre = np.ma.masked_array(milog[:,0],np.isnan(milog[:,0]))
milogmaskpost = np.ma.masked_array(milog[:,1],np.isnan(milog[:,1]))
milogmaskpre = milogmaskpre[milogmaskpre>-1000]
milogmaskpost = milogmaskpost[milogmaskpost>-1000]
fig = plt.figure(figsize = (8,8))
fig.subplots_adjust(bottom=0.2)
fig.subplots_adjust(left=0.2)
ax = fig.add_subplot(1,1,1)
adjust_spines(ax, ['left','bottom'])
cols = ['r','b','k','g','m','y']
# histpre, binspre = np.histogram(milogmaskpre, bins=20)
# histpost, binspre = np.histogram(milogmaskpre, bins=20)
ax.hist(milogmaskpre, bins=20,histtype='step',color='b', range = [-600,0])
ax.hist(milogmaskpost,bins=20,histtype='step',color='g', range = [-600,0]) # normed=True,
[xmin, xmax, ymin, ymax] = ax.axis()
ax.set_yticks([ymin,ymax])
ax.set_yticklabels([int(ymin),int(ymax)], fontsize = 25)
ax.xaxis.set_label_coords(0.5, -0.15)
ax.set_xticks([xmin,xmax])
ax.set_xticklabels([xmin,xmax], fontsize = 25)
ax.set_xlabel('Joint Entropy', fontsize = 25)
ax.set_ylabel('Frequency', fontsize = 25)
ax.yaxis.set_label_coords( -0.15, 0.5)
fig.savefig('MIlogdata.png', transparent = True)
| bsd-3-clause |
ManuSchmi88/landlab | landlab/plot/imshow.py | 3 | 21050 | #! /usr/bin/env python
"""
Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.plot.imshow.imshow_grid
~landlab.plot.imshow.imshow_grid_at_cell
~landlab.plot.imshow.imshow_grid_at_node
"""
import numpy as np
import inspect
from landlab.field.scalar_data_fields import FieldError
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
from landlab.grid import CLOSED_BOUNDARY
from landlab.grid.raster import RasterModelGrid
from landlab.grid.voronoi import VoronoiDelaunayGrid
from landlab.utils.decorators import deprecated
def imshow_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid_at_node(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values
if values_at_node.size != grid.number_of_nodes:
raise ValueError('number of values does not match number of nodes')
values_at_node = np.ma.masked_where(
grid.status_at_node == CLOSED_BOUNDARY, values_at_node)
try:
shape = grid.shape
except AttributeError:
shape = (-1, )
_imshow_grid_values(grid, values_at_node.reshape(shape), **kwds)
if isinstance(values, str):
plt.title(values)
@deprecated(use='imshow_grid_at_node', version='0.5')
def imshow_node_grid(grid, values, **kwds):
imshow_grid_at_node(grid, values, **kwds)
def imshow_grid_at_cell(grid, values, **kwds):
"""Map view of grid data over all grid cells.
Prepares a map view of data over all cells in the grid.
Method can take any of the same ``**kwds`` as :func:`imshow_grid_at_node`.
Construction ::
imshow_grid_at_cell(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Values at the cells on the grid. Alternatively, can be a field name
(string) from which to draw the data from the grid.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
Raises
------
ValueError
If input grid is not uniform rectilinear.
"""
if isinstance(values, str):
try:
values_at_cell = grid.at_cell[values]
except FieldError:
values_at_cell = grid.at_node[values]
else:
values_at_cell = values
if values_at_cell.size == grid.number_of_nodes:
values_at_cell = values_at_cell[grid.node_at_cell]
if values_at_cell.size != grid.number_of_cells:
raise ValueError('number of values must match number of cells or '
'number of nodes')
values_at_cell = np.ma.asarray(values_at_cell)
values_at_cell.mask = True
values_at_cell.mask[grid.core_cells] = False
myimage = _imshow_grid_values(grid,
values_at_cell.reshape(grid.cell_grid_shape),
**kwds)
if isinstance(values, str):
plt.title(values)
return myimage
@deprecated(use='imshow_grid_at_cell', version='0.5')
def imshow_cell_grid(grid, values, **kwds):
imshow_grid_at_cell(grid, values, **kwds)
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=(None, None),
symmetric_cbar=False, cmap='pink', limits=None,
colorbar_label = None,
allow_colorbar=True, vmin=None, vmax=None,
norm=None, shrink=1., color_for_closed='black',
color_for_background=None, show_elements=False,
output=None):
gridtypes = inspect.getmro(grid.__class__)
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.)
if isinstance(grid, RasterModelGrid):
if values.ndim != 2:
raise ValueError('values must have ndim == 2')
y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5
kwds = dict(cmap=cmap)
(kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds['vmin'], kwds['vmax']) = (- limit, limit)
elif limits is not None:
(kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
else:
if vmin is not None:
kwds['vmin'] = vmin
if vmax is not None:
kwds['vmax'] = vmax
if np.isclose(grid.dx, grid.dy):
if values.size == grid.number_of_nodes:
myimage = plt.imshow(
values.reshape(grid.shape), origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
else: # this is a cell grid, and has been reshaped already...
myimage = plt.imshow(values, origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
myimage = plt.pcolormesh(x, y, values, **kwds)
plt.gca().set_aspect(1.)
plt.autoscale(tight=True)
if allow_colorbar:
cb = plt.colorbar(norm=norm, shrink=shrink)
if colorbar_label:
cb.set_label(colorbar_label)
elif VoronoiDelaunayGrid in gridtypes:
# This is still very much ad-hoc, and needs prettifying.
# We should save the modifications needed to plot color all the way
# to the diagram edge *into* the grid, for faster plotting.
# (see http://stackoverflow.com/questions/20515554/...
# colorize-voronoi-diagram)
# (This technique is not implemented yet)
from scipy.spatial import voronoi_plot_2d
import matplotlib.colors as colors
import matplotlib.cm as cmx
cm = plt.get_cmap(cmap)
if (limits is None) and ((vmin is None) and (vmax is None)):
# only want to work with NOT CLOSED nodes
open_nodes = grid.status_at_node != 4
if symmetric_cbar:
(var_min, var_max) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
limit = max(abs(var_min), abs(var_max))
(vmin, vmax) = (- limit, limit)
else:
(vmin, vmax) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
elif limits is not None:
(vmin, vmax) = (limits[0], limits[1])
else:
open_nodes = grid.status_at_node != 4
if vmin is None:
vmin = values.flat[open_nodes].min()
if vmax is None:
vmax = values.flat[open_nodes].max()
cNorm = colors.Normalize(vmin, vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorVal = scalarMap.to_rgba(values)
if show_elements:
myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
show_points=False)
# show_points to be supported in scipy0.18, but harmless for now
mycolors = (i for i in colorVal)
for order in grid.vor.point_region:
region = grid.vor.regions[order]
colortouse = next(mycolors)
if -1 not in region:
polygon = [grid.vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=colortouse)
plt.gca().set_aspect(1.)
# plt.autoscale(tight=True)
# Tempting though it is to move the boundary outboard of the outermost
# nodes (e.g., to the outermost corners), this is a bad idea, as the
# outermost cells tend to have highly elongated shapes which make the
# plot look stupid
plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))
scalarMap.set_array(values)
if allow_colorbar:
cb = plt.colorbar(scalarMap, shrink=shrink)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == '-' and grid_units[0] == '-':
plt.xlabel('X')
plt.ylabel('Y')
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
if plot_name is not None:
plt.title('%s' % (plot_name))
if var_name is not None or var_units is not None:
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + ' (' + var_units + ')'
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = '(' + var_units + ')'
assert type(colorbar_label) is str
assert allow_colorbar
cb.set_label(colorbar_label)
if color_for_background is not None:
plt.gca().set_axis_bgcolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
def imshow_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes or cells in the grid.
Data is plotted as colored cells. If at='node', the surrounding cell is
shaded with the value at the node at its center. If at='cell', the cell
is shaded with its own value. Outer edges of perimeter cells are
extrapolated. Closed elements are colored uniformly (default black,
overridden with kwd 'color_for_closed'); other open boundary nodes get
their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node or cell values, or a field name as a string from which to draw
the data.
at : str, {'node', 'cell'}
Tells plotter where values are defined.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
show = kwds.pop('show', False)
values_at = kwds.pop('values_at', 'node')
values_at = kwds.pop('at', values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == 'node':
imshow_grid_at_node(grid, values, **kwds)
elif values_at == 'cell':
imshow_grid_at_cell(grid, values, **kwds)
else:
raise TypeError('value location %s not understood' % values_at)
# retained for backwards compatibility:
if show:
plt.show()
| mit |
NelisVerhoef/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/io/tests/parser/quoting.py | 7 | 5796 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0394'))
tm.assert_frame_equal(result, expected)
| apache-2.0 |
mehdidc/scikit-learn | sklearn/neighbors/tests/test_kde.py | 17 | 5626 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
"""Smoke test for various metrics and algorithms"""
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
jbloom/mutpath | src/plot.py | 1 | 10257 | """Module for performing plotting for ``mutpath`` package.
This module uses ``pylab`` and ``matplotlib`` to make plots. These plots will
fail if ``pylab`` and ``matplotlib`` are not available for importation. Before
running any function in this module, you can run the *PylabAvailable*
function to determine if ``pylab`` and ``matplotlib`` are available. Otherwise,
calling any other function will raise an Exception if thise modules are
not available. The ``pdf`` backend is used for ``matplotlib`` / ``pylab``. This means
that plots must be created as PDF files.
Functions are:
`PylabAvailable`
`CumulativeFractionPlot`
'DatesPlot`
`Base10Formatter`
`SplitLabel`
Written by Jesse Bloom.
"""
import os
import sys
import math
# global variable _pylabavailable indicates if pylab/matplotlib present
try:
import matplotlib
matplotlib.use('pdf')
import pylab
_pylabavailable = True
except ImportError:
_pylabavailable = False
def PylabAvailable():
"""Returns True if pylab/matplotlib available, False otherwise.
You should call this function to test for the availability of the
pylab/matplotlib plotting modules before using other functions in
this module.
"""
return _pylabavailable
def DatesPlot(mutdates, plotfile, interval):
"""Plots dates of mutations.
Uses pylab / matplotlib to plot the dates and credible intervals
for mutations. Will raise an error *PylabAvailable() == False*.
The plot is a PDF.
* *mutdates* is a list of the mutations, in the form of the tuples
*(median, mininterval, maxinterval, mut, fractoca, weight)*. Mutations
are plotted in the order they are listed. In these tuples:
* *median* : posterior median date
* *minterval* : minimum of credible interval
* *maxinterval* : maximum of credible interval
* *mut* : string giving name of mutation
* *fractoca* : probability mutation is on path from common ancestor
to starting sequence
* *weight* : fraction of paths containing mutation.
* *plotfile* is a string giving the name of the PDF file we create.
* *interval* is the range of the credible interval. For example, 0.9
means a 90% credible interval.
"""
ext = os.path.splitext(plotfile)[1].lower()
if ext != '.pdf':
raise ValueError("Extension must be .pdf, but found %s" % ext)
if not PylabAvailable():
raise ValueError("pylab / matplotlib not available.")
if not mutdates:
raise ValueError("no mutation dates to plot")
tocalabels = []
tocamedians = []
tocaerrlow = []
tocaerrhigh = []
tocays = []
fromcalabels = []
fromcamedians = []
fromcaerrlow = []
fromcaerrhigh = []
fromcays = []
y = 0
for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates:
label = "%s" % (mut)
errlow = median - mininterval
errhigh = maxinterval - median
if fractoca > 0.5:
tocays.append(y)
tocalabels.append(label)
tocamedians.append(median)
tocaerrlow.append(errlow)
tocaerrhigh.append(errhigh)
else:
fromcays.append(y)
fromcalabels.append(label)
fromcamedians.append(median)
fromcaerrlow.append(errlow)
fromcaerrhigh.append(errhigh)
y += 1
(lmargin, rmargin, bmargin, tmargin) = (0.11, 0.05, 0.08, 0.01)
matplotlib.rc('font', size=10)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('legend', numpoints=1)
matplotlib.rc('legend', fontsize=10)
fig = pylab.figure(figsize=(6, 6))
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - tmargin - bmargin])
tocabar = fromcabar = None
if tocalabels:
tocabar = pylab.errorbar(tocamedians, tocays, xerr=[tocaerrlow, tocaerrhigh], fmt='sr')
if fromcalabels:
fromcabar = pylab.errorbar(fromcamedians, fromcays, xerr=[fromcaerrlow, fromcaerrhigh], fmt='sb')
ny = len(mutdates)
pylab.gca().set_ylim((-1, ny))
pylab.gca().yaxis.set_major_locator(matplotlib.ticker.FixedLocator([y for y in range(ny)]))
pylab.gca().yaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(tocalabels + fromcalabels))
pylab.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
pylab.xlabel("Date (posterior median and Bayesian %.2f%s credible interval)" % (interval * 100, '%'))
if tocabar and fromcabar:
pylab.legend([tocabar[0], fromcabar[0]], ['path to common ancestor', 'path from common ancestor'], loc='lower right')
elif tocabar:
pylab.legend([tocabar[0]], ['path to common ancestor'], loc='lower right')
elif fromcabar:
pylab.legend([fromcabar[0]], ['path from common ancestor'], loc='lower right')
pylab.savefig(plotfile)
def CumulativeFractionPlot(datalist, plotfile, title, xlabel):
"""Creates a cumulative fraction plot.
Takes a list of numeric data. Plots a cumulative fraction
plot giving the fraction of the data points that are <=
the indicated value.
*datalist* is a list of numbers giving the data for which we
are computing the cumulative fraction plot. Raises an
exception if this is an empty list.
*plotfile* is the name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be '.pdf'.
*title* is a string placed above the plot as a title. Uses LaTex
formatting.
*xlabel* is the label given to the X-axis. Uses LaTex formatting.
This function uses pylab / matplotlib. It will raise an Exception if
these modules cannot be imported (if PylabAvailable() is False).
"""
if len(datalist) < 1:
raise ValueError("datalist is empty")
if not _pylabavailable:
raise ImportError("Could not find pylab or matplotlib")
if os.path.splitext(plotfile)[1] != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
datalist.sort() # sort from smallest to largest
(xmin, xmax) = (datalist[0], datalist[-1])
n = len(datalist)
cumfracs = []
cf = 0.0
for x in datalist:
cf += 1. / n
cumfracs.append(cf)
assert len(datalist) == len(cumfracs)
assert abs(1.0 - cf) < 1e-7
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=12)
fig = pylab.figure(figsize=(6, 4))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.01, 0.15, 0.1)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 -\
bmargin - tmargin])
pylab.plot(datalist, cumfracs, 'r-')
pylab.gca().set_ylim([0, 1])
pylab.gca().set_xlim([xmin, xmax])
pylab.ylabel('cumulative fraction')
pylab.xlabel(xlabel)
pylab.title(title)
if plotfile:
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def Base10Formatter(number, exp_cutoff, exp_decimal_digits, decimal_digits):
"""Converts a number into Latex formatting with scientific notation.
Takes a number and converts it to a string that can be shown
in LaTex using math mode. It is converted to scientific notation
if the criteria specified by exp_cutoff.
*number* the number to be formatted, should be a float or integer.
Currently only works for numbers >= 0
*exp_cutoff* convert to scientific notation if abs(math.log10(number)) >= this.
*exp_decimal_digits* show this many digits after the decimal if number
is converted to scientific notation.
*decimal_digits* show this many digits after the decimal if number
is NOT converted to scientific notation.
The returned value is the LaTex' string. If the number is zero, the
returned string is simply '0'.
>>> Base10Formatter(103, 3, 1, 1)
'103.0'
>>> Base10Formatter(103.0, 2, 1, 1)
'1.0 \\\\times 10^{2}'
>>> Base10Formatter(103.0, 2, 2, 1)
'1.03 \\\\times 10^{2}'
>>> Base10Formatter(2892.3, 3, 1, 1)
'2.9 \\\\times 10^{3}'
>>> Base10Formatter(0.0, 3, 1, 1)
'0'
>>> Base10Formatter(0.012, 2, 1, 1)
'1.2 \\\\times 10^{-2}'
>>> Base10Formatter(-0.1, 3, 1, 1)
Traceback (most recent call last):
...
ValueError: number must be >= 0
"""
if number < 0:
raise ValueError('number must be >= 0')
if number == 0:
return '0'
exponent = int(math.log10(number))
if math.log10(number) < exponent and number < 1:
exponent -= 1
if abs(exponent) >= exp_cutoff:
x = number / (10.**exponent)
formatstr = '%.' + '%d' % exp_decimal_digits + 'f \\times 10^{%d}'
return formatstr % (x, exponent)
else:
formatstr = '%.' + '%d' % decimal_digits + 'f'
return formatstr % number
def SplitLabel(label, splitlen, splitchar):
"""Splits a string with a return if it exceeds a certain length.
*label* a string giving the label we might split.
*splitlen* the maximum length of a label before we attempt to
split it.
*splitchar* the character added when splitting a label.
If len(*label*) > *splitlen*, we attempt to split the label in the
middle by adding *splitchar*. The label is split as close to the
middle as possible while splitting at a space.
No splitting as label length less than *splitlen*
>>> SplitLabel('WT virus 1', 10, '\\n')
'WT virus 1'
Splitting of this label
>>> SplitLabel('WT plasmid 1', 10, '\\n')
'WT\\nplasmid 1'
Splitting of this label
>>> SplitLabel('mutated WT plasmid 1', 10, '\\n')
'mutated WT\\nplasmid 1'
"""
if len(label) <= splitlen:
return label
else:
j = 0
imid = len(label) // 2
index = None
while 0 <= imid - j <= imid + j < len(label):
if label[imid - j].isspace():
return "%s%s%s" % (label[ : imid - j], splitchar, label[imid - j + 1 : ])
elif label[imid + j].isspace():
return "%s%s%s" % (label[ : imid + j], splitchar, label[imid + j + 1 : ])
j += 1
else:
return label # no white space to split
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
xavierwu/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/linear_model/coordinate_descent.py | 37 | 74167 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
FernanOrtega/DAT210x | Module3/notes/2Dscatter_example.py | 1 | 1245 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 21:14:57 2017
@author: fernando
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df = pd.read_csv('concrete.csv')
print df.describe()
# Plot 1
df.plot.scatter(x='cement', y='strength')
plt.suptitle('Cement vs str')
plt.xlabel('Cement')
plt.ylabel('Str')
# Plot 2
df.plot.scatter(x='slag', y='strength')
plt.suptitle('slag vs str')
plt.xlabel('slag')
plt.ylabel('Str')
# Plot 3
df.plot.scatter(x='ash', y='strength')
plt.suptitle('ash vs str')
plt.xlabel('ash')
plt.ylabel('Str')
# Plot 4
df.plot.scatter(x='water', y='strength')
plt.suptitle('water vs str')
plt.xlabel('water')
plt.ylabel('Str')
# Plot 5
df.plot.scatter(x='superplastic', y='strength')
plt.suptitle('superplastic vs str')
plt.xlabel('superplastic')
plt.ylabel('Str')
# Plot 6
df.plot.scatter(x='coarseagg', y='strength')
plt.suptitle('coarseagg vs str')
plt.xlabel('coarseagg')
plt.ylabel('Str')
# Plot 7
df.plot.scatter(x='fineagg', y='strength')
plt.suptitle('fineagg vs str')
plt.xlabel('fineagg')
plt.ylabel('Str')
# Plot 8
df.plot.scatter(x='age', y='strength')
plt.suptitle('age vs str')
plt.xlabel('age')
plt.ylabel('Str')
plt.show() | mit |
Nelca/buildMLSystem | ch04/blei_lda.py | 3 | 1602 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from gensim import corpora, models, similarities
from mpltools import style
import matplotlib.pyplot as plt
import numpy as np
from os import path
style.use('ggplot')
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=corpus.id2word, alpha=None)
for ti in xrange(84):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
print('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
print()
print()
print()
thetas = [model[c] for c in corpus]
plt.hist([len(t) for t in thetas], np.arange(42))
plt.ylabel('Nr of documents')
plt.xlabel('Nr of topics')
plt.savefig('../1400OS_04_01+.png')
model1 = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=corpus.id2word, alpha=1.)
thetas1 = [model1[c] for c in corpus]
#model8 = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word, alpha=1.e-8)
#thetas8 = [model8[c] for c in corpus]
plt.clf()
plt.hist([[len(t) for t in thetas], [len(t) for t in thetas1]], np.arange(42))
plt.ylabel('Nr of documents')
plt.xlabel('Nr of topics')
plt.text(9, 223, r'default alpha')
plt.text(26, 156, 'alpha=1.0')
plt.savefig('../1400OS_04_02+.png')
| mit |
elijah513/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
cbertinato/pandas | pandas/tests/indexes/test_setops.py | 1 | 2362 | '''
The tests in this package are to ensure the proper resultant dtypes of
set operations.
'''
import itertools as it
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import Int64Index, RangeIndex
from pandas.tests.indexes.conftest import indices_list
import pandas.util.testing as tm
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex)
}
@pytest.fixture(params=list(it.combinations(indices_list, 2)),
ids=lambda x: type(x[0]).__name__ + type(x[1]).__name__)
def index_pair(request):
"""
Create all combinations of 2 index types.
"""
return request.param
def test_union_same_types(indices):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = indices.sort_values()
idx2 = indices.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index_pair):
# GH 23525
idx1, idx2 = index_pair
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail('This test only considers non compatible indexes.')
if any(isinstance(idx, pd.MultiIndex) for idx in index_pair):
pytest.xfail('This test doesn\'t consider multiindixes.')
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail('This test only considers non matching dtypes.')
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype('O')
assert idx2.union(idx1).dtype == np.dtype('O')
@pytest.mark.parametrize('idx_fact1,idx_fact2',
COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
| bsd-3-clause |
Panos-Bletsos/spark-cost-model-optimizer | python/pyspark/sql/session.py | 11 | 24874 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedContext
if session is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if SparkSession._instantiatedContext is None:
SparkSession._instantiatedContext = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedContext = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/widgets/menu.py | 3 | 4882 | import numpy as np
import matplotlib
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib.mathtext as mathtext
import matplotlib.pyplot as plt
import matplotlib.artist as artist
import matplotlib.image as image
class ItemProperties:
def __init__(self, fontsize=14, labelcolor='black', bgcolor='yellow',
alpha=1.0):
self.fontsize = fontsize
self.labelcolor = labelcolor
self.bgcolor = bgcolor
self.alpha = alpha
self.labelcolor_rgb = colors.colorConverter.to_rgb(labelcolor)
self.bgcolor_rgb = colors.colorConverter.to_rgb(bgcolor)
class MenuItem(artist.Artist):
parser = mathtext.MathTextParser("Bitmap")
padx = 5
pady = 5
def __init__(self, fig, labelstr, props=None, hoverprops=None,
on_select=None):
artist.Artist.__init__(self)
self.set_figure(fig)
self.labelstr = labelstr
if props is None:
props = ItemProperties()
if hoverprops is None:
hoverprops = ItemProperties()
self.props = props
self.hoverprops = hoverprops
self.on_select = on_select
x, self.depth = self.parser.to_mask(
labelstr, fontsize=props.fontsize, dpi=fig.dpi)
if props.fontsize!=hoverprops.fontsize:
raise NotImplementedError(
'support for different font sizes not implemented')
self.labelwidth = x.shape[1]
self.labelheight = x.shape[0]
self.labelArray = np.zeros((x.shape[0], x.shape[1], 4))
self.labelArray[:, :, -1] = x/255.
self.label = image.FigureImage(fig, origin='upper')
self.label.set_array(self.labelArray)
# we'll update these later
self.rect = patches.Rectangle((0,0), 1,1)
self.set_hover_props(False)
fig.canvas.mpl_connect('button_release_event', self.check_select)
def check_select(self, event):
over, junk = self.rect.contains(event)
if not over:
return
if self.on_select is not None:
self.on_select(self)
def set_extent(self, x, y, w, h):
print x, y, w, h
self.rect.set_x(x)
self.rect.set_y(y)
self.rect.set_width(w)
self.rect.set_height(h)
self.label.ox = x+self.padx
self.label.oy = y-self.depth+self.pady/2.
self.rect._update_patch_transform()
self.hover = False
def draw(self, renderer):
self.rect.draw(renderer)
self.label.draw(renderer)
def set_hover_props(self, b):
if b:
props = self.hoverprops
else:
props = self.props
r, g, b = props.labelcolor_rgb
self.labelArray[:, :, 0] = r
self.labelArray[:, :, 1] = g
self.labelArray[:, :, 2] = b
self.label.set_array(self.labelArray)
self.rect.set(facecolor=props.bgcolor, alpha=props.alpha)
def set_hover(self, event):
'check the hover status of event and return true if status is changed'
b,junk = self.rect.contains(event)
changed = (b != self.hover)
if changed:
self.set_hover_props(b)
self.hover = b
return changed
class Menu:
def __init__(self, fig, menuitems):
self.figure = fig
fig.suppressComposite = True
self.menuitems = menuitems
self.numitems = len(menuitems)
maxw = max([item.labelwidth for item in menuitems])
maxh = max([item.labelheight for item in menuitems])
totalh = self.numitems*maxh + (self.numitems+1)*2*MenuItem.pady
x0 = 100
y0 = 400
width = maxw + 2*MenuItem.padx
height = maxh+MenuItem.pady
for item in menuitems:
left = x0
bottom = y0-maxh-MenuItem.pady
item.set_extent(left, bottom, width, height)
fig.artists.append(item)
y0 -= maxh + MenuItem.pady
fig.canvas.mpl_connect('motion_notify_event', self.on_move)
def on_move(self, event):
draw = False
for item in self.menuitems:
draw = item.set_hover(event)
if draw:
self.figure.canvas.draw()
break
fig = plt.figure()
fig.subplots_adjust(left=0.3)
props = ItemProperties(labelcolor='black', bgcolor='yellow',
fontsize=15, alpha=0.2)
hoverprops = ItemProperties(labelcolor='white', bgcolor='blue',
fontsize=15, alpha=0.2)
menuitems = []
for label in ('open', 'close', 'save', 'save as', 'quit'):
def on_select(item):
print 'you selected', item.labelstr
item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
on_select=on_select)
menuitems.append(item)
menu = Menu(fig, menuitems)
plt.show()
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_nanops.py | 7 | 44169 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import warnings
import numpy as np
from pandas import Series, isnull
from pandas.types.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(tm.TestCase):
def setUp(self):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype(
'O'), self.arr_int.astype('O'), self.arr_bool.astype(
'O'), self.arr_complex.astype('O'), self.arr_str.astype(
'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
with np.errstate(invalid='ignore'):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex,
self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def tearDown(self):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real,
check_dtype=check_dtype)
tm.assert_almost_equal(targ.imag, res.imag,
check_dtype=check_dtype)
def check_fun_data(self, testfunc, targfunc, testarval, targarval,
targarnanval, check_dtype=True, **kwargs):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
try:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1),
'skipna: %s' % skipna, 'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc, testarval2, targarval2,
targarnanval2, check_dtype=check_dtype, **kwargs)
def check_fun(self, testfunc, targfunc, testar, targar=None,
targarnan=None, **kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc, testarval, targarval,
targarnanval, **kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar, 'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc, allow_complex=True,
allow_all_nan=True, allow_str=True, allow_date=True,
allow_tdelta=True, allow_obj=True, **kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'), self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_complex_nan',
'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap, func=targfunc,
allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def check_funs_ddof(self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=False,
allow_tdelta=False,
allow_obj=True, ):
for ddof in range(3):
try:
self.check_funs(testfunc, targfunc, allow_complex,
allow_all_nan, allow_str, allow_date,
allow_tdelta, allow_obj, ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof, )
raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum, allow_str=False,
allow_date=False, allow_tdelta=True, check_dtype=False)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean, allow_complex=False,
allow_obj=False, allow_str=False, allow_date=False,
allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.9.0':
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
self.assertEqual(result, a)
self.assertEqual(result, np_result)
self.assertTrue(result.dtype == np.float64)
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, 'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
self.assertTrue(
result.dtype == np.float64,
"return dtype expected from %s is np.float64, "
"got %s instead" % (method, result.dtype))
else:
self.assertTrue(
result.dtype == dtype,
"return dtype expected from %s is %s, "
"got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import sem
self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isnull(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, allow_str=False,
allow_obj=False, allow_date=True, allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func, allow_date=True,
allow_tdelta=True, allow_str=False,
allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func, allow_str=False,
allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
def test_nanskew(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_funs(nanops.nankurt, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
res25 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_kendall(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
def test_nancorr_spearman(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim, )
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
self.assertTrue(res0)
else:
self.assertFalse(res0)
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', False),
('arr_nan_nanj', False), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', False),
('arr_float_nan', False), ('arr_nan_nan', False),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__isfinite(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', True),
('arr_nan_nanj', True), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', True),
('arr_float_nan', True), ('arr_nan_nan', True),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__bn_ok_dtype(self):
self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test'))
class TestEnsureNumeric(tm.TestCase):
def test_numeric_values(self):
# Test integer
self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int')
# Test float
self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float')
# Test complex
self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j,
'Failed for complex')
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
self.assertTrue(np.allclose(nanops._ensure_numeric(values), values),
'Failed for numeric ndarray')
# Test object ndarray
o_values = values.astype(object)
self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values),
'Failed for object ndarray')
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values),
'Failed for convertible string ndarray')
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
self.assertRaises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0),
'Failed for convertible integer string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1),
'Failed for convertible float string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j),
'Failed for convertible complex string')
def test_non_convertable_values(self):
self.assertRaises(TypeError, lambda: nanops._ensure_numeric('foo'))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric({}))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(tm.TestCase):
# xref GH10242
def setUp(self):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5,
check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan,
check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(actual_variance, np.array(
[self.variance, 1.0 / 12]), check_less_precise=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var,
check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var,
check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var,
check_less_precise=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287
], [0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array([[[0.13762259, 0.05619224, 0.11568816
], [0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054
], [0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]])
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
self.assertTrue(np.isnan(var[3]))
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
self.assertTrue(np.isnan(std[3]))
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
self.assertEqual(result, 0.0)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
self.assertEqual(skew, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nanskew(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nanskew(right_tailed), 0)
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
self.assertAlmostEqual(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
self.assertTrue(np.isnan(skew))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
self.assertEqual(kurt, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nankurt(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nankurt(right_tailed), 0)
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
self.assertAlmostEqual(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
self.assertTrue(np.isnan(kurt))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, m]), m)
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([n, n]), n)
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assertRaisesRegexp(OverflowError, msg):
nanops._checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assertRaisesRegexp(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
nanops._checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s'
], exit=False)
| gpl-3.0 |
musically-ut/statsmodels | statsmodels/tsa/vector_ar/dynamic.py | 27 | 9932 | # pylint: disable=W0201
from statsmodels.compat.python import iteritems, string_types, range
import numpy as np
from statsmodels.tools.decorators import cache_readonly
import pandas as pd
from . import var_model as _model
from . import util
from . import plotting
FULL_SAMPLE = 0
ROLLING = 1
EXPANDING = 2
def _get_window_type(window_type):
if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):
return window_type
elif isinstance(window_type, string_types):
window_type_up = window_type.upper()
if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
return FULL_SAMPLE
elif window_type_up == 'ROLLING':
return ROLLING
elif window_type_up == 'EXPANDING':
return EXPANDING
raise Exception('Unrecognized window type: %s' % window_type)
class DynamicVAR(object):
"""
Estimates time-varying vector autoregression (VAR(p)) using
equation-by-equation least squares
Parameters
----------
data : pandas.DataFrame
lag_order : int, default 1
window : int
window_type : {'expanding', 'rolling'}
min_periods : int or None
Minimum number of observations to require in window, defaults to window
size if None specified
trend : {'c', 'nc', 'ct', 'ctt'}
TODO
Returns
-------
**Attributes**:
coefs : WidePanel
items : coefficient names
major_axis : dates
minor_axis : VAR equation names
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.names = list(data.columns)
self.neqs = len(self.names)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = _get_window_type(window_type)
if self._is_rolling:
if window is None:
raise Exception('Must pass window when doing rolling '
'regression')
if min_periods is None:
min_periods = window
else:
window = len(self.x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
@cache_readonly
def T(self):
"""
Number of time periods in results
"""
return len(self.result_index)
@property
def nobs(self):
# Stub, do I need this?
data = dict((eq, r.nobs) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
@cache_readonly
def equations(self):
eqs = {}
for col, ts in iteritems(self.y):
model = pd.ols(y=ts, x=self.x, window=self._window,
window_type=self._window_type,
min_periods=self._min_periods)
eqs[col] = model
return eqs
@cache_readonly
def coefs(self):
"""
Return dynamic regression coefficients as WidePanel
"""
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.beta
panel = pd.WidePanel.fromDict(data)
# Coefficient names become items
return panel.swapaxes('items', 'minor')
@property
def result_index(self):
return self.coefs.major_axis
@cache_readonly
def _coefs_raw(self):
"""
Reshape coefficients to be more amenable to dynamic calculations
Returns
-------
coefs : (time_periods x lag_order x neqs x neqs)
"""
coef_panel = self.coefs.copy()
del coef_panel['intercept']
coef_values = coef_panel.swapaxes('items', 'major').values
coef_values = coef_values.reshape((len(coef_values),
self.lag_order,
self.neqs, self.neqs))
return coef_values
@cache_readonly
def _intercepts_raw(self):
"""
Similar to _coefs_raw, return intercept values in easy-to-use matrix
form
Returns
-------
intercepts : (T x K)
"""
return self.coefs['intercept'].values
@cache_readonly
def resid(self):
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.resid
return pd.DataFrame(data)
def forecast(self, steps=1):
"""
Produce dynamic forecast
Parameters
----------
steps
Returns
-------
forecasts : pandas.DataFrame
"""
output = np.empty((self.T - steps, self.neqs))
y_values = self.y.values
y_index_map = dict((d, idx) for idx, d in enumerate(self.y.index))
result_index_map = dict((d, idx) for idx, d in enumerate(self.result_index))
coefs = self._coefs_raw
intercepts = self._intercepts_raw
# can only produce this many forecasts
forc_index = self.result_index[steps:]
for i, date in enumerate(forc_index):
# TODO: check that this does the right thing in weird cases...
idx = y_index_map[date] - steps
result_idx = result_index_map[date] - steps
y_slice = y_values[:idx]
forcs = _model.forecast(y_slice, coefs[result_idx],
intercepts[result_idx], steps)
output[i] = forcs[-1]
return pd.DataFrame(output, index=forc_index, columns=self.names)
def plot_forecast(self, steps=1, figsize=(10, 10)):
"""
Plot h-step ahead forecasts against actual realizations of time
series. Note that forecasts are lined up with their respective
realizations.
Parameters
----------
steps :
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=figsize, nrows=self.neqs,
sharex=True)
forc = self.forecast(steps=steps)
dates = forc.index
y_overlay = self.y.reindex(dates)
for i, col in enumerate(forc.columns):
ax = axes[i]
y_ts = y_overlay[col]
forc_ts = forc[col]
y_handle = ax.plot(dates, y_ts.values, 'k.', ms=2)
forc_handle = ax.plot(dates, forc_ts.values, 'k-')
fig.legend((y_handle, forc_handle), ('Y', 'Forecast'))
fig.autofmt_xdate()
fig.suptitle('Dynamic %d-step forecast' % steps)
# pretty things up a bit
plotting.adjust_subplots(bottom=0.15, left=0.10)
plt.draw_if_interactive()
@property
def _is_rolling(self):
return self._window_type == ROLLING
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
data = dict((eq, r.r2) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
class DynamicPanelVAR(DynamicVAR):
"""
Dynamic (time-varying) panel vector autoregression using panel ordinary
least squares
Parameters
----------
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.neqs = len(data.columns)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _filter_data(lhs, rhs):
"""
Data filtering routine for dynamic VAR
lhs : DataFrame
original data
rhs : DataFrame
lagged variables
Returns
-------
"""
def _has_all_columns(df):
return np.isfinite(df.values).sum(1) == len(df.columns)
rhs_valid = _has_all_columns(rhs)
if not rhs_valid.all():
pre_filtered_rhs = rhs[rhs_valid]
else:
pre_filtered_rhs = rhs
index = lhs.index.union(rhs.index)
if not index.equals(rhs.index) or not index.equals(lhs.index):
rhs = rhs.reindex(index)
lhs = lhs.reindex(index)
rhs_valid = _has_all_columns(rhs)
lhs_valid = _has_all_columns(lhs)
valid = rhs_valid & lhs_valid
if not valid.all():
filt_index = rhs.index[valid]
filtered_rhs = rhs.reindex(filt_index)
filtered_lhs = lhs.reindex(filt_index)
else:
filtered_rhs, filtered_lhs = rhs, lhs
return filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid
def _make_lag_matrix(x, lags):
data = {}
columns = []
for i in range(1, 1 + lags):
lagstr = 'L%d.'% i
lag = x.shift(i).rename(columns=lambda c: lagstr + c)
data.update(lag._series)
columns.extend(lag.columns)
return pd.DataFrame(data, columns=columns)
class Equation(object):
"""
Stub, estimate one equation
"""
def __init__(self, y, x):
pass
if __name__ == '__main__':
import pandas.util.testing as ptest
ptest.N = 500
data = ptest.makeTimeDataFrame().cumsum(0)
var = DynamicVAR(data, lag_order=2, window_type='expanding')
var2 = DynamicVAR(data, lag_order=2, window=10,
window_type='rolling')
| bsd-3-clause |
jblackburne/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
mmaelicke/scikit-gstat | skgstat/plotting/stvariogram_plot3d.py | 1 | 3989 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
try:
import plotly.graph_objects as go
except ImportError:
pass
def __calculate_plot_data(stvariogram, **kwargs):
xx, yy = stvariogram.meshbins
z = stvariogram.experimental
# x = xx.flatten()
# y = yy.flatten()
# apply the model
nx = kwargs.get('x_resolution', 100)
nt = kwargs.get('t_resolution', 100)
# model spacing
_xx, _yy = np.mgrid[
0:np.nanmax(stvariogram.xbins):nx * 1j,
0:np.nanmax(stvariogram.tbins):nt * 1j
]
model = stvariogram.fitted_model
lags = np.vstack((_xx.flatten(), _yy.flatten())).T
# apply the model
_z = model(lags)
return xx.T, yy.T, z, _xx, _yy, _z
def matplotlib_plot_3d(stvariogram, kind='scatter', ax=None, elev=30, azim=220, **kwargs):
# get the data, spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
x = xx.flatten()
y = yy.flatten()
# some settings
c = kwargs.get('color', kwargs.get('c', 'b'))
cmap = kwargs.get('model_color', kwargs.get('cmap', 'terrain'))
alpha = kwargs.get('alpha', 0.8)
depthshade = kwargs.get('depthshade', False)
# handle the axes
if ax is not None:
if not isinstance(ax, Axes3D):
raise ValueError('The passed ax object is not an instance of mpl_toolkis.mplot3d.Axes3D.')
fig = ax.get_figure()
else:
fig = plt.figure(figsize=kwargs.get('figsize', (10, 10)))
ax = fig.add_subplot(111, projection='3d')
# do the plot
ax.view_init(elev=elev, azim=azim)
if kind == 'surf':
ax.plot_trisurf(x, y, z, color=c, alpha=alpha)
elif kind == 'scatter':
ax.scatter(x, y, z, c=c, depthshade=depthshade)
else:
raise ValueError('%s is not a valid 3D plot' % kind)
# add the model
if not kwargs.get('no_model', False):
ax.plot_trisurf(_xx.flatten(), _yy.flatten(), _z, cmap=cmap, alpha=alpha)
# labels:
ax.set_xlabel('space')
ax.set_ylabel('time')
ax.set_zlabel('semivariance [%s]' % stvariogram.estimator.__name__)
# return
return fig
def plotly_plot_3d(stvariogram, kind='scatter', fig=None, **kwargs):
# get the data spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
# get some settings
c = kwargs.get('color', kwargs.get('c', 'black'))
cmap = kwargs.get('model_color', kwargs.get('colorscale', kwargs.get('cmap', 'Electric')))
alpha = kwargs.get('opacity', kwargs.get('alpha', 0.6))
# handle the figue
if fig is None:
fig = go.Figure()
# do the plot
if kind == 'surf':
fig.add_trace(
go.Surface(
x=xx,
y=yy,
z=z.reshape(xx.shape),
opacity=0.8 * alpha,
colorscale=[[0, c], [1, c]],
name='experimental variogram'
)
)
elif kind == 'scatter' or kwargs.get('add_points', False):
fig.add_trace(
go.Scatter3d(
x=xx.flatten(),
y=yy.flatten(),
z=z,
mode='markers',
opacity=alpha,
marker=dict(color=c, size=kwargs.get('size', 4)),
name='experimental variogram'
)
)
# add the model
if not kwargs.get('no_model', False):
fig.add_trace(
go.Surface(
x=_xx,
y=_yy,
z=_z.reshape(_xx.shape),
opacity=max(1, alpha * 1.2),
colorscale=cmap,
name='%s model' % stvariogram.model.__name__
)
)
# set some labels
fig.update_layout(scene=dict(
xaxis_title='space',
yaxis_title='time',
zaxis_title='semivariance [%s]' % stvariogram.estimator.__name__
))
# return
return fig
| mit |
mjvakili/ccppabc | ccppabc/code/test_data.py | 1 | 4243 | '''
Test the data.py module
'''
import numpy as np
import matplotlib.pyplot as plt
import util
import data as Data
# --- Halotools ---
from halotools.empirical_models import PrebuiltHodModelFactory
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
def PlotCovariance(obvs, Mr=21, b_normal=0.25, inference='mcmc'):
''' Plot the covariance matrix for a specified obvs
'''
# import the covariance matrix
covar = Data.data_cov(Mr=Mr, b_normal=b_normal, inference=inference)
if obvs == 'xi':
obvs_cov = covar[1:16 , 1:16]
r_bin = Data.xi_binedges()
elif obvs == 'gmf':
obvs_cov = covar[17:, 17:]
binedges = Data.data_gmf_bins()
r_bin = 0.5 * (binedges[:-1] + binedges[1:])
n_bin = int(np.sqrt(obvs_cov.size))
# calculate the reduced covariance for plotting
red_covar = np.zeros([n_bin, n_bin])
for ii in range(n_bin):
for jj in range(n_bin):
red_covar[ii][jj] = obvs_cov[ii][jj]/np.sqrt(obvs_cov[ii][ii] * obvs_cov[jj][jj])
prettyplot()
fig = plt.figure()
sub = fig.add_subplot(111)
cont = sub.pcolormesh(r_bin, r_bin, red_covar, cmap=plt.cm.afmhot_r)
plt.colorbar(cont)
sub.set_xlim([r_bin[0], r_bin[-1]])
sub.set_ylim([r_bin[0], r_bin[-1]])
sub.set_xscale('log')
sub.set_yscale('log')
sub.set_xlabel(r'$\mathtt{r}\;[\mathtt{Mpc/h}$]', fontsize=25)
sub.set_ylabel(r'$\mathtt{r}\;[\mathtt{Mpc/h}$]', fontsize=25)
fig_file = ''.join([util.fig_dir(),
obvs.upper(), 'covariance',
'.Mr', str(Mr),
'.bnorm', str(round(b_normal,2)),
'.', inference, '_inf.png'])
fig.savefig(fig_file, bbox_inches='tight')
plt.close()
return None
# ---- Plotting ----
def xi(Mr=20, Nmock=500):
'''
Plot xi(r) of the fake observations
'''
prettyplot()
pretty_colors = prettycolors()
xir, cii = Data.data_xi(Mr=Mr, Nmock=Nmock)
rbin = Data.data_xi_bins(Mr=Mr)
fig = plt.figure(1)
sub = fig.add_subplot(111)
sub.plot(rbin, rbin*xir, c='k', lw=1)
sub.errorbar(rbin, rbin*xir, yerr = rbin*cii**0.5 , fmt="ok", ms=1, capsize=2, alpha=1.)
sub.set_xlim([0.1, 15])
sub.set_ylim([1, 10])
sub.set_yscale("log")
sub.set_xscale("log")
sub.set_xlabel(r'$\mathtt{r}\; (\mathtt{Mpc})$', fontsize=25)
sub.set_ylabel(r'$\mathtt{r} \xi_{\rm gg}$', fontsize=25)
fig_file = ''.join([util.fig_dir(),
'xi.Mr', str(Mr), '.Nmock', str(Nmock), '.png'])
fig.savefig(fig_file, bbox_inches='tight')
plt.close()
return None
def gmf(Mr=20, Nmock=500):
'''
Plot Group Multiplicty Function of fake observations
'''
prettyplot()
pretty_colors = prettycolors()
# import fake obs GMF
gmf, sig_gmf = Data.data_gmf(Mr=Mr, Nmock=Nmock)
# group richness bins
gmf_bin = Data.data_gmf_bins()
fig = plt.figure(1)
sub = fig.add_subplot(111)
sub.errorbar(
0.5*(gmf_bin[:-1]+gmf_bin[1:]), gmf, yerr=sig_gmf,
fmt="ok", capsize=1.0
)
sub.set_xlim([1, 60])
sub.set_yscale('log')
sub.set_ylabel(r"Group Multiplicity Function (h$^{3}$ Mpc$^{-3}$)", fontsize=20)
sub.set_xlabel(r"$\mathtt{Group\;\;Richness}$", fontsize=20)
# save to file
fig_file = ''.join([util.fig_dir(),
'gmf.Mr', str(Mr), '.Nmock', str(Nmock), '.png'])
fig.savefig(fig_file, bbox_inches='tight')
return None
# ---- tests -----
def xi_binning_tests(Mr=20):
model = PrebuiltHodModelFactory('zheng07', threshold = -1.0*np.float(Mr))
rbins = np.concatenate([np.array([0.1]), np.logspace(np.log10(0.5), np.log10(20.), 15)])
print 'R bins = ', rbins
for ii in xrange(10):
model.populate_mock() # population mock realization
#rbins = np.logspace(-1, np.log10(20.), 16)
r_bin, xi_r = model.mock.compute_galaxy_clustering(rbins=rbins)
print xi_r
def test_nbar(Mr=21, b_normal=0.25):
print Data.data_nbar(Mr=Mr, b_normal=b_normal)
if __name__=='__main__':
PlotCovariance('gmf', inference='mcmc')
#test_nbar()
#xi_cov(Mr=20, Nmock=500)
#xi_binning_tests(Mr=20)
| mit |
Andreea-G/Codds_DarkMatter | src/experiment_HaloIndep_Band.py | 1 | 59260 | """
Copyright (c) 2015 Andreea Georgescu
Created on Wed Mar 4 00:47:37 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO! This only works for CDMSSi!
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from experiment_HaloIndep import *
import interp_uniform as unif
# from interp import interp1d
from scipy import interpolate
from scipy.optimize import brentq, minimize
from basinhopping import *
import matplotlib.pyplot as plt
import os # for speaking
import parallel_map as par
DEBUG = F
DEBUG_FULL = F
USE_BASINHOPPING = T
ADAPT_KWARGS = F
ALLOW_MOVE = T
class ConstraintsFunction(object):
""" Class to implement the constraints function that will be passed as an argunent
to the minimization routines.
Input:
args: Arguments needed for calculating the constraints:
vminStar, logetaStar, vminStar_index
"""
def __init__(self, *args):
self.vminStar = args[0]
self.logetaStar = args[1]
self.vminStar_index = args[2]
self.vmin_max = 2000
def __call__(self, x, close=True):
"""
Input:
x: ndarray
Returns:
constraints: ndarray
Constraints vector, where each value must be >= 0 for the
constraint to be specified. Contains:
0 - 8: bounds: 3 * (x.size/2) constraints = 9 for x.size/2 = 3
9 - 12: sorted array: 2 * (x.size/2 - 1) constraints = 4 for x.size/2 = 3
13 - 15: vminStar_index: x.size/2 constraints = 3 for x.size/2 = 3
16 - 18: vminStar and logetaStar: x.size/2 constraints = 3 for x.size/2 = 3
"""
constraints = np.concatenate([x[:x.size/2], self.vmin_max - x[:x.size/2], -x[x.size/2:],
np.diff(x[:x.size/2]), np.diff(-x[x.size/2:]),
(x[:x.size/2] - self.vminStar) * (-x[x.size/2:] + self.logetaStar),
self.vminStar - x[:self.vminStar_index],
x[self.vminStar_index: x.size/2] - self.vminStar,
x[x.size/2: x.size/2 + self.vminStar_index] - self.logetaStar,
self.logetaStar - x[x.size/2 + self.vminStar_index:]])
if close:
is_not_close = np.logical_not(np.isclose(constraints, np.zeros_like(constraints), atol=1e-5))
is_not_close[:3 * (x.size/2)] = True
constraints = np.where(is_not_close, constraints, np.abs(constraints))
if np.any(np.isnan(constraints)):
raise ValueError
return constraints
class Experiment_EHI(Experiment_HaloIndep):
""" Class implementing the extended maximum likelihood halo-independent (EHI)
method to obtain the confidence band for experiments with potential signals and
unbinned data (arXiv:1507.03902).
Input:
expername: string
The name of the experiment.
scattering_type: string
The type of scattering. Can be
- 'SI' (spin-independent)
- 'SDAV' (spin-dependent, axial-vector)
- 'SDPS' (spin-dependent, pseudo-scalar)
mPhi: float, optional
The mass of the mediator.
method: str, optional
Type of minimization solver to be passed as a parameter to the minimization
routine. Can be 'SLSQP' or 'COBYLA'.
"""
def __init__(self, expername, scattering_type, mPhi=mPhiRef, method='SLSQP'):
super().__init__(expername, scattering_type, mPhi)
module = import_file(INPUT_DIR + expername + ".py")
self.ERecoilList = module.ERecoilList
self.mu_BKG_i = module.mu_BKG_i
self.NBKG = module.NBKG
self.method = method
def _VMinSortedList(self, mx, fp, fn, delta):
""" Computes the list of vmin corresponsing to measured recoil energies,
sorted in increasing order. Will be useful as starting guesses.
"""
self.vmin_sorted_list = np.sort(VMin(self.ERecoilList, self.mT[0], mx, delta))
return
def ResponseTables(self, vmin_min, vmin_max, vmin_step, mx, fp, fn, delta,
output_file_tail):
""" Computes response tables
- self.diff_response_tab is a table of [vmin, DifferentialResponse(Eee_i)]
pairs for each vmin in the range [vminmin, vminmax], corresponding to measured
recoil energies Eee_i. It is a 3D matrix where
axis = 0 has dimension self.ERecoilList.size()
axis = 1 has dimension vmin_list.size() + 1 (where + 1 is because we
prepend zeros for vmin = 0)
axis = 2 has dimension 2 for the pairs of [vmin, diff_response].
- self.response_tab is a table of [vmin, Response] pairs for each vmin
in the range [vminmin, vminmax], corresponding to DifferentialResponse
integrated over the full energy range. It is a 2D matrix where
axis = 1 has dimension vmin_list.size() + 1 (where +1 is because we
prepend zeros for vmin = 0)
axis = 2 has dimension 2 for the pairs of [vmin, diff_response].
Input:
vmin_min, vmin_max, vmin_step: float
Vmin range and vmin step size.
mx, fp, fn, delta: float
output_file_tail: string
Tag to be added to the file name since the results for
self.vmin_sorted_list, self.diff_response_tab and self.response_tab
are each written to files.
"""
self._VMinSortedList(mx, fp, fn, delta)
file = output_file_tail + "_VminSortedList.dat"
print(file)
np.savetxt(file, self.vmin_sorted_list)
if delta == 0:
branches = [1]
else:
branches = [1, -1]
self.vmin_linspace = np.linspace(vmin_min, vmin_max,
(vmin_max - vmin_min)/vmin_step + 1)
self.diff_response_tab = np.zeros((self.ERecoilList.size, 1))
self.response_tab = np.zeros(1)
self.curly_H_tab = np.zeros((self.ERecoilList.size, 1))
self.xi_tab = np.zeros(1)
xi = 0
vmin_prev = 0
for vmin in self.vmin_linspace:
print("vmin =", vmin)
diff_resp_list = np.zeros((1, len(self.ERecoilList)))
resp = 0
curly_H = np.zeros((1, len(self.ERecoilList)))
for sign in branches:
(ER, qER, const_factor) = self.ConstFactor(vmin, mx, fp, fn, delta, sign)
v_delta = min(VminDelta(self.mT, mx, delta))
diff_resp_list += np.array([self.DifferentialResponse(Eee, qER, const_factor)
for Eee in self.ERecoilList])
resp += integrate.quad(self.DifferentialResponse, self.Ethreshold, self.Emaximum,
args=(qER, const_factor), epsrel=PRECISSION, epsabs=0)[0]
curly_H += np.array([[integrate.quad(self.DifferentialResponse_Full, v_delta, vmin,
args=(Eee, mx, fp, fn, delta, sign),
epsrel=PRECISSION, epsabs=0)[0]
for Eee in self.ERecoilList]])
xi += self.Exposure * \
self.IntegratedResponse(vmin_prev, vmin,
self.Ethreshold, self.Emaximum,
mx, fp, fn, delta)
vmin_prev = vmin
self.diff_response_tab = \
np.append(self.diff_response_tab, diff_resp_list.transpose(), axis=1)
self.response_tab = np.append(self.response_tab, [resp], axis=0)
self.curly_H_tab = np.append(self.curly_H_tab, curly_H.transpose(), axis=1)
# counts/kg/keVee
self.xi_tab = np.append(self.xi_tab, [xi], axis=0)
# counts * day
self.vmin_linspace = np.insert(self.vmin_linspace, 0., 0)
file = output_file_tail + "_VminLinspace.dat"
print(file)
np.savetxt(file, self.vmin_linspace)
file = output_file_tail + "_DiffRespTable.dat"
print(file)
np.savetxt(file, self.diff_response_tab)
file = output_file_tail + "_RespTable.dat"
print(file)
np.savetxt(file, self.response_tab)
file = output_file_tail + "_CurlyHTable.dat"
print(file)
np.savetxt(file, self.curly_H_tab)
file = output_file_tail + "_XiTable.dat"
print(file)
np.savetxt(file, self.xi_tab)
os.system("say Finished response tables.")
return
def PlotTable(self, func, dimension=0, xlim=None, ylim=None,
title=None, plot_close=True, plot_show=True, show_zero_axis=False):
""" Plots response tables.
Input:
func: callable
Function or list of functions of v that should be plotted.
dimension: int
0 (if there's only one function) or
1 (if there are a list of functions).
xlim, ylim: float
Axis limits for the plots.
title: string
Plot title.
plot_close, plot_show: bool
Whether to call plt.close() before and plt.show() after.
show_zero_axis: bool
Whether to show a horizontal line at zero.
"""
if plot_close:
plt.close()
if dimension == 0:
# only one function
plt.plot(self.vmin_linspace, np.array([func(v)
for v in self.vmin_linspace]))
elif dimension == 1:
# list of interpolated functions for each energy in self.ERecoilList
for i in range(self.ERecoilList.size):
plt.plot(self.vmin_linspace, np.array([func[i](v)
for v in self.vmin_linspace]))
else:
print("Wrong dimension")
raise TypeError
if show_zero_axis:
plt.plot(self.vmin_linspace, np.zeros(self.vmin_linspace.size))
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if title is not None:
plt.title(title)
if plot_show:
plt.show()
def ImportResponseTables(self, output_file_tail, plot=True):
""" Imports the data for the response tables from files.
"""
file = output_file_tail + "_VminSortedList.dat"
with open(file, 'r') as f_handle:
self.vmin_sorted_list = np.loadtxt(f_handle)
file = output_file_tail + "_VminLinspace.dat"
with open(file, 'r') as f_handle:
self.vmin_linspace = np.loadtxt(f_handle)
file = output_file_tail + "_DiffRespTable.dat"
with open(file, 'r') as f_handle:
self.diff_response_tab = np.loadtxt(f_handle)
file = output_file_tail + "_RespTable.dat"
with open(file, 'r') as f_handle:
self.response_tab = np.loadtxt(f_handle)
file = output_file_tail + "_CurlyHTable.dat"
with open(file, 'r') as f_handle:
self.curly_H_tab = np.loadtxt(f_handle)
file = output_file_tail + "_XiTable.dat"
with open(file, 'r') as f_handle:
self.xi_tab = np.loadtxt(f_handle)
self.diff_response_interp = np.array([unif.interp1d(self.vmin_linspace, dr)
for dr in self.diff_response_tab])
self.response_interp = unif.interp1d(self.vmin_linspace, self.response_tab)
self.curly_H_interp = np.array([unif.interp1d(self.vmin_linspace, h)
for h in self.curly_H_tab])
if plot:
self.PlotTable(self.diff_response_interp, dimension=1)
self.PlotTable(self.response_interp, dimension=0)
self.PlotTable(self.curly_H_interp, dimension=1, title='Curly H')
return
def VminIntegratedResponseTable(self, vmin_list):
return np.array([[integrate.quad(self.diff_response_interp[i],
vmin_list[a], vmin_list[a + 1],
epsrel=PRECISSION, epsabs=0)[0]
for a in range(vmin_list.size - 1)]
for i in range(self.ERecoilList.size)])
def IntegratedResponseTable(self, vmin_list):
return np.array([integrate.quad(self.response_interp,
vmin_list[a], vmin_list[a + 1],
epsrel=PRECISSION, epsabs=0)[0]
for a in range(vmin_list.size - 1)])
def _MinusLogLikelihood(self, vars_list, vminStar=None, logetaStar=None,
vminStar_index=None):
""" Compute -log(L)
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)]
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
Returns:
-log(L): float
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
else:
vmin_list_w0 = np.insert(vars_list[: vars_list.size/2],
vminStar_index, vminStar)
logeta_list = np.insert(vars_list[vars_list.size/2:],
vminStar_index, logetaStar)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
vmin_resp_integr = self.VminIntegratedResponseTable(vmin_list_w0)
resp_integr = self.IntegratedResponseTable(vmin_list_w0)
mu_i = self.Exposure * np.dot(vmin_resp_integr, 10**logeta_list)
Nsignal = self.Exposure * np.dot(10**logeta_list, resp_integr)
if vminStar is None:
self.gamma_i = (self.mu_BKG_i + mu_i) / self.Exposure
# counts/kg/keVee/days
result = self.NBKG + Nsignal - np.log(self.mu_BKG_i + mu_i).sum()
if np.any(self.mu_BKG_i + mu_i < 0):
raise ValueError
return result
def MinusLogLikelihood(self, vars_list, constr_func=None, vminStar=None,
logetaStar=None, vminStar_index=None):
""" Computes -log(L) and tests whether constraints are satisfied.
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)].
constr_func: callable, optional
Ffunction of vars_list giving an array of values each corresponding to
a constraint. If the values are > 0 the constraints are satisfied.
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
vminStar_index: int, optional
Index corresponding to the position of vminStar in the array of vmin
steps.
Returns:
-log(L) if all constraints are valid, and the result of an artificial
function that grows with the invalid constraints if not all constraints
are valid.
"""
constraints = constr_func(vars_list)
constr_not_valid = constraints < 0
if DEBUG_FULL:
print("*** vars_list =", repr(vars_list))
if DEBUG_FULL:
print("vminStar =", vminStar)
print("logetaStar =", logetaStar)
print("constraints =", repr(constraints))
print("constr_not_valid =", repr(constr_not_valid))
try:
return self._MinusLogLikelihood(vars_list, vminStar=vminStar,
logetaStar=logetaStar,
vminStar_index=vminStar_index)
except:
if np.any(constr_not_valid):
constr_list = constraints[constr_not_valid]
if DEBUG_FULL:
print("Constraints not valid!!")
print("constr sum =", -constr_list.sum())
return min(max(-constr_list.sum(), 0.001) * 1e6, 1e6)
else:
print("Error!!")
raise
def OptimalLikelihood(self, output_file_tail, logeta_guess):
""" Finds the best-fit piecewise constant eta function corresponding to the
minimum MinusLogLikelihood, and prints the results to file (value of the minimum
MinusLogLikelihood and the corresponding values of vmin, logeta steps.
Input:
output_file_tail: string
Tag to be added to the file name.
logeta_guess: float
Guess for the value of log(eta) in the minimization procedure.
"""
self.ImportResponseTables(output_file_tail, plot=False)
vars_guess = np.append(self.vmin_sorted_list,
logeta_guess * np.ones(self.vmin_sorted_list.size))
print("vars_guess =", vars_guess)
vmin_max = self.vmin_linspace[-1]
def constr_func(x, vmin_max=vmin_max):
""" 0 - 8: bounds: 3 * (x.size/2) constraints = 9 for x.size/2 = 3
9 - 12: sorted array: 2 * (x.size/2 - 1) constraints = 4 for x.size/2 = 3
"""
constraints = np.concatenate([x[:x.size/2], vmin_max - x[:x.size/2],
-x[x.size/2:],
np.diff(x[:x.size/2]), np.diff(-x[x.size/2:])])
is_not_close = np.logical_not(
np.isclose(constraints, np.zeros_like(constraints), atol=1e-5))
is_not_close[:3 * (x.size/2)] = T
constr = np.where(is_not_close, constraints, np.abs(constraints))
if DEBUG:
print("***constr =", repr(constr))
print("tf =", repr(constr < 0))
return constr
constr = ({'type': 'ineq', 'fun': constr_func})
np.random.seed(0)
if USE_BASINHOPPING:
minimizer_kwargs = {"constraints": constr, "args": (constr_func,)}
optimum_log_likelihood = basinhopping(self.MinusLogLikelihood, vars_guess,
minimizer_kwargs=minimizer_kwargs,
niter=30, stepsize=0.1)
else:
optimum_log_likelihood = minimize(self.MinusLogLikelihood, vars_guess,
args=(constr_func,), constraints=constr)
print(optimum_log_likelihood)
print("MinusLogLikelihood =", self._MinusLogLikelihood(optimum_log_likelihood.x))
print("vars_guess =", repr(vars_guess))
file = output_file_tail + "_GloballyOptimalLikelihood.dat"
print(file)
np.savetxt(file, np.append([optimum_log_likelihood.fun],
optimum_log_likelihood.x))
os.system("say 'Finished finding optimum'")
return
def ImportOptimalLikelihood(self, output_file_tail, plot=False):
""" Import the minumum -log(L) and the locations of the steps in the best-fit
logeta function.
Input:
output_file_tail: string
Tag to be added to the file name.
plot: bool, optional
Whether to plot response tables.
"""
self.ImportResponseTables(output_file_tail, plot=False)
file = output_file_tail + "_GloballyOptimalLikelihood.dat"
with open(file, 'r') as f_handle:
optimal_result = np.loadtxt(f_handle)
self.optimal_logL = optimal_result[0]
self.optimal_vmin = optimal_result[1: optimal_result.size/2 + 1]
self.optimal_logeta = optimal_result[optimal_result.size/2 + 1:]
print("optimal result =", optimal_result)
if plot:
self._MinusLogLikelihood(optimal_result[1:]) # to get self.gamma_i
self.xi_interp = unif.interp1d(self.vmin_linspace, self.xi_tab)
self.h_sum_tab = np.sum([self.curly_H_tab[i] / self.gamma_i[i]
for i in range(self.optimal_vmin.size)], axis=0)
self.q_tab = 2 * (self.xi_tab - self.h_sum_tab)
self.h_sum_interp = unif.interp1d(self.vmin_linspace, self.h_sum_tab)
self.q_interp = unif.interp1d(self.vmin_linspace, self.q_tab)
file = output_file_tail + "_HSumTable.dat"
print(file)
np.savetxt(file, self.h_sum_tab)
file = output_file_tail + "_QTable.dat"
print(file)
np.savetxt(file, self.q_tab)
self.PlotTable(self.xi_interp, dimension=0, plot_show=False)
self.PlotTable(self.h_sum_interp, dimension=0,
xlim=[0, 2000], ylim=[-2e24, 2e24],
title='Xi, H_sum', plot_close=False)
self.PlotTable(self.q_interp, dimension=0,
xlim=[0, 2000], ylim=[-2e24, 2e24],
title='q', show_zero_axis=True)
return
def _PlotStepFunction(self, vmin_list, logeta_list,
xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
mark=None, color=None, linewidth=1,
plot_close=True, plot_show=True):
""" Plots a step-like function, given the location of the steps.
"""
if plot_close:
plt.close()
print(vmin_list)
print(logeta_list)
x = np.append(np.insert(vmin_list, 0, 0), vmin_list[-1] + 0.1)
y = np.append(np.insert(logeta_list, 0, logeta_list[0]), -80)
if color is not None:
plt.step(x, y, color=color, linewidth=linewidth)
if mark is not None:
plt.plot(x, y, mark, color=color)
else:
plt.step(x, y, linewidth=linewidth)
if mark is not None:
plt.plot(x, y, mark)
# plt.xlim([vmin_list[0] * xlim_percentage[0], vmin_list[-1] * xlim_percentage[1]])
plt.xlim([0, 1000])
plt.ylim([max(logeta_list[-1] * ylim_percentage[0], -60),
max(logeta_list[0] * ylim_percentage[1], -35)])
if plot_show:
plt.show()
return
def PlotOptimum(self, xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
color='red', linewidth=1,
plot_close=True, plot_show=True):
""" Plots the best-fit eta(vmin) step function.
"""
self._PlotStepFunction(self.optimal_vmin, self.optimal_logeta,
xlim_percentage=xlim_percentage,
ylim_percentage=ylim_percentage,
color=color, linewidth=linewidth,
plot_close=plot_close, plot_show=plot_show)
return
def PlotConstrainedOptimum(self, vminStar, logetaStar, vminStar_index,
xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
plot_close=True, plot_show=True):
""" Plots the eta(vmin) function given the location of vminStar and logetaStar.
"""
self._PlotStepFunction(self.optimal_vmin, self.optimal_logeta,
plot_close=plot_close, plot_show=False)
x = np.insert(self.constr_optimal_vmin, vminStar_index, vminStar)
y = np.insert(self.constr_optimal_logeta, vminStar_index, logetaStar)
self._PlotStepFunction(x, y,
xlim_percentage=xlim_percentage,
ylim_percentage=ylim_percentage,
plot_close=False, plot_show=False, mark='x', color='k')
plt.plot(vminStar, logetaStar, '*')
if plot_show:
plt.show()
return
def _ConstrainedOptimalLikelihood(self, vminStar, logetaStar, vminStar_index):
""" Finds the constrained minimum MinusLogLikelihood for given vminStar,
logetaStar and vminStar_index.
Input:
vminStar, logetaStar: float
Location of the constrained step.
vminStar_index: int
Index of vminStar in the list of vmin steps of the constrained optimum
logeta function.
Returns:
constr_optimal_logl: float
The constrained minimum MinusLogLikelihood
"""
if DEBUG:
print("~~~~~ vminStar_index =", vminStar_index)
vmin_guess_left = np.array([self.optimal_vmin[ind]
if self.optimal_vmin[ind] < vminStar
else vminStar * (1 - 0.001*(vminStar_index - ind))
for ind in range(vminStar_index)])
vmin_guess_right = np.array([self.optimal_vmin[ind]
if self.optimal_vmin[ind] > vminStar
else vminStar * (1 + 0.001*(ind - vminStar_index - 1))
for ind in range(vminStar_index, self.optimal_vmin.size)])
vmin_guess = np.append(vmin_guess_left, vmin_guess_right)
logeta_guess = self.optimal_logeta
logeta_guess_left = np.maximum(logeta_guess[:vminStar_index],
np.ones(vminStar_index)*logetaStar)
logeta_guess_right = np.minimum(logeta_guess[vminStar_index:],
np.ones(logeta_guess.size - vminStar_index) *
logetaStar)
logeta_guess = np.append(logeta_guess_left, logeta_guess_right)
vars_guess = np.append(vmin_guess, logeta_guess)
constr_func = ConstraintsFunction(vminStar, logetaStar, vminStar_index)
constr = ({'type': 'ineq', 'fun': constr_func})
args = (constr_func, vminStar, logetaStar, vminStar_index)
sol_not_found = True
attempts = 3
np.random.seed(1)
random_variation = 1e-5
if USE_BASINHOPPING:
class TakeStep(object):
def __init__(self, stepsize=0.1):
pass
self.stepsize = stepsize
def __call__(self, x):
x[:x.size/2] += np.random.uniform(-5. * self.stepsize,
5. * self.stepsize,
x[x.size/2:].shape)
x[x.size/2:] += np.random.uniform(-self.stepsize,
self.stepsize, x[x.size/2:].shape)
return x
take_step = TakeStep()
class AdaptiveKwargs(object):
def __init__(self, kwargs, random_variation=random_variation):
self.kwargs = kwargs
self.random_variation = random_variation
def __call__(self):
new_kwargs = {}
random_factor_vminStar = \
(1 + self.random_variation * np.random.uniform(-1, 1))
random_factor_logetaStar = \
(1 + self.random_variation * np.random.uniform(-1, 1))
constr_func_args = (self.kwargs['args'][1] * random_factor_vminStar,
self.kwargs['args'][2] * random_factor_logetaStar,
self.kwargs['args'][3])
constr_func = ConstraintsFunction(*constr_func_args)
new_kwargs['args'] = (constr_func,) + constr_func_args
new_kwargs['constraints'] = ({'type': 'ineq', 'fun': constr_func})
if 'method' in self.kwargs:
new_kwargs['method'] = self.kwargs['method']
return new_kwargs
minimizer_kwargs = {"constraints": constr, "args": args, "method": self.method}
if ADAPT_KWARGS:
adapt_kwargs = AdaptiveKwargs(minimizer_kwargs, random_variation)
else:
adapt_kwargs = None
while sol_not_found and attempts > 0:
try:
if USE_BASINHOPPING:
constr_optimum_log_likelihood = \
basinhopping(self.MinusLogLikelihood, vars_guess,
minimizer_kwargs=minimizer_kwargs, niter=5,
take_step=take_step, adapt_kwargs=adapt_kwargs,
stepsize=0.2)
else:
constr_optimum_log_likelihood = \
minimize(self.MinusLogLikelihood, vars_guess,
args=args, constraints=constr, method=self.method)
constraints = constr_func(constr_optimum_log_likelihood.x)
is_not_close = np.logical_not(np.isclose(constraints,
np.zeros_like(constraints)))
constr_not_valid = np.logical_and(constraints < 0, is_not_close)
sol_not_found = np.any(constr_not_valid)
except ValueError:
sol_not_found = True
pass
attempts -= 1
args = (constr_func,
vminStar * (1 + random_variation * np.random.uniform(-1, 1)),
logetaStar * (1 + random_variation * np.random.uniform(-1, 1)),
vminStar_index)
if USE_BASINHOPPING:
minimizer_kwargs = {"constraints": constr, "args": args}
if DEBUG and sol_not_found:
print(attempts, "attempts left! ####################################" +
"################################################################")
print("sol_not_found =", sol_not_found)
if sol_not_found:
if DEBUG:
print("ValueError: sol not found")
raise ValueError
if DEBUG:
print(constr_optimum_log_likelihood)
print("kwargs =", constr_optimum_log_likelihood.minimizer.kwargs)
print("args =", constr_optimum_log_likelihood.minimizer.kwargs['args'])
print("optimum_logL =", self.optimal_logL)
print("constraints=", repr(constraints))
print("constr_not_valid =", repr(constr_not_valid))
print("vars_guess =", repr(vars_guess))
print("optimum_logL =", self.optimal_logL)
print("vminStar_index =", vminStar_index)
return constr_optimum_log_likelihood
def ConstrainedOptimalLikelihood(self, vminStar, logetaStar, plot=False):
""" Finds the constrained minimum MinusLogLikelihood for given vminStar,
logetaStar. Finds the minimum for all vminStar_index, and picks the best one.
Input:
vminStar, logetaStar: float
Location of constrained step.
plot: bool, optional
Whether to plot the constrained piecewice-constant logeta function.
Returns:
constr_optimal_logl: float
The constrained minimum MinusLogLikelihood
"""
vminStar_index = 0
while vminStar_index < self.optimal_vmin.size and \
vminStar > self.optimal_vmin[vminStar_index]:
vminStar_index += 1
try:
constr_optimum_log_likelihood = \
self._ConstrainedOptimalLikelihood(vminStar, logetaStar, vminStar_index)
except ValueError:
optim_logL = 10**6
pass
else:
optim_logL = constr_optimum_log_likelihood.fun
original_optimum = constr_optimum_log_likelihood
vminStar_index_original = vminStar_index
index = vminStar_index
while ALLOW_MOVE and index > 0:
try:
index -= 1
new_optimum = \
self._ConstrainedOptimalLikelihood(vminStar, logetaStar, index)
except ValueError:
pass
else:
if new_optimum.fun < optim_logL:
os.system("say Moved left")
print("Moved left, index is now", index)
print("############################################################" +
"############################################################")
vminStar_index = index
constr_optimum_log_likelihood = new_optimum
optim_logL = constr_optimum_log_likelihood.fun
index = vminStar_index_original
while ALLOW_MOVE and index < self.optimal_vmin.size:
try:
index += 1
new_optimum = self._ConstrainedOptimalLikelihood(vminStar, logetaStar,
index)
except ValueError:
pass
else:
if new_optimum.fun < optim_logL:
os.system("say Moved right")
print("Moved right, index is now", index)
print("############################################################" +
"############################################################")
vminStar_index = index
constr_optimum_log_likelihood = new_optimum
optim_logL = constr_optimum_log_likelihood.fun
if optim_logL == 10**6:
raise ValueError
self.constr_optimal_logl = constr_optimum_log_likelihood.fun
vars_result = constr_optimum_log_likelihood.x
self.constr_optimal_vmin = vars_result[: vars_result.size/2]
self.constr_optimal_logeta = vars_result[vars_result.size/2:]
if plot:
print("vminStar =", vminStar)
print("logetaStar =", logetaStar)
print("vminStar_index =", vminStar_index)
try:
print("original:", original_optimum)
except:
print("Original failed.")
pass
try:
print("new:", constr_optimum_log_likelihood)
print(constr_optimum_log_likelihood.minimizer.kwargs['args'])
except:
print("All attepts failed.")
pass
try:
vminStar_rand = constr_optimum_log_likelihood.minimizer.kwargs['args'][1]
logetaStar_rand = constr_optimum_log_likelihood.minimizer.kwargs['args'][2]
constr_func = ConstraintsFunction(vminStar_rand, logetaStar_rand,
vminStar_index)
constraints = constr_func(constr_optimum_log_likelihood.x)
is_not_close = np.logical_not(np.isclose(constraints,
np.zeros_like(constraints)))
constr_not_valid = np.logical_and(constraints < 0, is_not_close)
sol_not_found = np.any(constr_not_valid)
print("random vminStar =", vminStar_rand)
print("random logetaStar =", logetaStar_rand)
print("x =", constr_optimum_log_likelihood.x)
print("constraints =", constraints)
print("is_not_close =", is_not_close)
print("constr_not_valid =", constr_not_valid)
print("sol_not_found =", sol_not_found)
except:
print("Error")
pass
os.system("say 'Finished plot'")
self.PlotConstrainedOptimum(vminStar_rand, logetaStar_rand, vminStar_index,
xlim_percentage=(0., 1.1),
ylim_percentage=(1.2, 0.8))
return self.constr_optimal_logl
def VminSamplingList(self, output_file_tail, vmin_min, vmin_max, vmin_num_steps,
steepness_vmin=1.5, steepness_vmin_center=2.5, plot=False):
""" Finds a non-linear way to sample the vmin range, such that more points are
sampled near the location of the steps of the best-fit logeta function, and
fewer in between. This is done by building a function of vmin that is steeper
near the steps and flatter elsewhere, and the steeper this function the more
samplings are done in this region.
Input:
output_file_tail: string
Tag to be added to the file name.
vmin_min, vmin_max: float
Range in vmin where the sampling should be made.
vmin_num_steps: int
Number of samples in vmin (approximate, the final number of steps is
not exact, due to taking floor() in some places.
steepness_vmin: float, optional
Parameter related to the steepness of this function to the left of the
leftmost step and to the right of the rightmost step.
steepness_vmin_center: float, optional
Similar parameter, but for the steepness in between the leftmost step
and the rightmost step.
plot: bool, optional
Whether to plot intermediate results such as the sampling function.
"""
self.ImportOptimalLikelihood(output_file_tail)
xmin = vmin_min
xmax = vmin_max
# TODO! This +4 is to compensate for a loss of ~4 points (not always 4 though),
# and it's due to taking floor later on.
# Find a better way to deal with this.
x_num_steps = vmin_num_steps # + 4
s = steepness_vmin
sc = steepness_vmin_center
x_lin = np.linspace(xmin, xmax, 1000)
x0_list = self.optimal_vmin
numx0 = x0_list.size
print("x0 =", x0_list)
def UnitStep(x): return (np.sign(x) + 1) / 2
def g1(x, x0, s0, xmin=xmin):
return np.log10(UnitStep(x - x0) +
UnitStep(x0 - x) *
(x0 - xmin) / (x + 10**s0 * (-x + x0) - xmin))
def g2(x, x0, s0, xmax=xmax):
return np.log10(UnitStep(x0 - x) +
UnitStep(x - x0) *
(x + 10**s0 * (-x + x0) - xmax) / (x0 - xmax))
def g(x, x0, s1, s2): return g1(x, x0, s1) + g2(x, x0, s2)
s_list = np.array([[s, sc]] + [[sc, sc]] * (numx0 - 2) + [[sc, s]])
def g_total(x, sign=1, x0=x0_list, s_list=s_list):
return np.array([sign * g(x, x0_list[i], s_list[i, 0], s_list[i, 1])
for i in range(x0_list.size)]).prod(axis=0)
g_lin = g_total(x_lin)
xT_guess = (x0_list[:-1] + x0_list[1:]) / 2
bounds = np.array([(x0_list[i], x0_list[i + 1])
for i in range(x0_list.size - 1)])
x_turns_max = np.array([minimize(g_total, np.array(xT_guess[i]),
args=(-1,), bounds=[bounds[i]]).x
for i in range(0, xT_guess.size, 2)])
x_turns_min = np.array([minimize(g_total, np.array(xT_guess[i]),
bounds=[bounds[i]]).x
for i in range(1, xT_guess.size, 2)])
x_turns = np.sort(np.append(x_turns_max, x_turns_min))
x_turns = np.append(np.insert(x_turns, 0, xmin), [xmax])
y_turns = g_total(x_turns)
print("x_turns =", x_turns)
print("y_turns =", y_turns)
def g_inverse(y, x1, x2):
return brentq(lambda x: g_total(x) - y, x1, x2)
def g_inverse_list(y_list, x1, x2):
return np.array([g_inverse(y, x1, x2) for y in y_list])
y_diff = np.diff(y_turns)
y_diff_sum = np.abs(y_diff).sum()
print("y_diff =", y_diff)
num_steps = np.array([max(1, np.floor(x_num_steps * np.abs(yd)/y_diff_sum))
for yd in y_diff])
print("num_steps =", num_steps)
y_list = np.array([np.linspace(y_turns[i], y_turns[i+1], num_steps[i])
for i in range(num_steps.size)])
x_list = np.array([g_inverse_list(y_list[i], x_turns[i], x_turns[i+1])
for i in range(y_list.size)])
x_list = np.concatenate(x_list)
y_list = np.concatenate(y_list)
x_list = x_list[np.array([x_list[i] != x_list[i+1]
for i in range(x_list.size - 1)] + [True])]
y_list = y_list[np.array([y_list[i] != y_list[i+1]
for i in range(y_list.size - 1)] + [True])]
self.vmin_sampling_list = x_list
if plot:
plt.close()
plt.plot(x_lin, g_lin)
plt.plot(x_turns, y_turns, 'o')
plt.plot(x_list, y_list, '*')
plt.xlim([xmin, xmax])
plt.ylim([min(-s * sc**(numx0 - 1), np.min(y_turns)),
max(s * sc**(numx0 - 1), np.max(y_turns))])
plt.show()
return
def OptimumStepFunction(self, vmin):
""" Best-fit logeta as a function of vmin for the optimal log(L).
Input:
vmin: float
Value of vmin for which to evaluate logeta.
Returns:
logeta: float
log(eta(vmin)) for the best-fit piecewise constant function.
"""
index = 0
while index < self.optimal_vmin.size and vmin > self.optimal_vmin[index]:
index += 1
if index == self.optimal_vmin.size:
return self.optimal_logeta[-1]*10
return self.optimal_logeta[index]
def VminLogetaSamplingTable(self, output_file_tail, logeta_percent_minus,
logeta_percent_plus, logeta_num_steps,
linear_sampling=True, steepness_logeta=1, plot=False):
""" Finds a non-linear way to sample both the vmin and logeta range, such that
more points are sampled near the location of the steps of the best-fit logeta
function, and fewer in between. This uses the sampling in vmin done by
VminSamplingList, and computes a non-linear sampling in logeta in a similar way
(by building a function of logeta that is steeper near the steps and flatter
elsewhere, and the steeper this function the more samplings are done in this
region).
Input:
output_file_tail: string
Tag to be added to the file name.
logeta_percent_minus, logeta_percent_plus: float
Range in logeta where the sampling should be made, given as percentage
in the negative and positive direction of the best-fit logeta.
logeta_num_steps: int
Number of samples in logeta.
steepness_logeta: float, optional
Parameter related to the steepness of this sampling function in logeta.
plot: bool, optional
Whether to plot intermediate results such as the sampling function.
"""
print(self.optimal_vmin)
print(self.optimal_logeta)
logeta_num_steps_minus = logeta_num_steps * \
logeta_percent_minus / (logeta_percent_minus + logeta_percent_plus)
logeta_num_steps_plus = logeta_num_steps * \
logeta_percent_plus / (logeta_percent_minus + logeta_percent_plus)
s = steepness_logeta
def f(x, xm, i, s0=s):
return (xm - x) / (10**s0 - 1) * 10**i + (10**s0 * x - xm) / (10**s0 - 1)
self.vmin_logeta_sampling_table = []
vmin_last_step = self.optimal_vmin[-1]
if linear_sampling:
for vmin in self.vmin_sampling_list:
logeta_opt = self.OptimumStepFunction(min(vmin, vmin_last_step))
if vmin < self.optimal_vmin[0]:
logeta_min = logeta_opt * (1 + 0.6 * logeta_percent_minus)
logeta_max = logeta_opt * (1 - logeta_percent_plus)
else:
if vmin < 600:
logeta_min = logeta_opt * (1 + logeta_percent_minus)
else:
logeta_min = logeta_opt * (1 + 0.6 * logeta_percent_minus)
logeta_max = logeta_opt * (1 - 0.5 * logeta_percent_plus)
logeta_list = [[vmin, logeta]
for logeta in np.linspace(logeta_min, logeta_max,
logeta_num_steps)]
self.vmin_logeta_sampling_table += [logeta_list]
else:
for vmin in self.vmin_sampling_list:
logeta_opt = self.OptimumStepFunction(min(vmin, vmin_last_step))
logeta_min = logeta_opt * (1 + logeta_percent_minus)
logeta_max = logeta_opt * (1 - logeta_percent_plus)
logeta_list_minus = [[vmin, f(logeta_opt, logeta_min, i)]
for i in np.linspace(s, 0, logeta_num_steps_minus)]
logeta_list_plus = [[vmin, f(logeta_opt, logeta_max, i)]
for i in np.linspace(s / logeta_num_steps_plus, s,
logeta_num_steps_plus)]
self.vmin_logeta_sampling_table += [logeta_list_minus + logeta_list_plus]
self.vmin_logeta_sampling_table = np.array(self.vmin_logeta_sampling_table)
if plot:
self.PlotSamplingTable(plot_close=True)
return
def PlotSamplingTable(self, plot_close=False, plot_show=True, plot_optimum=True):
""" Plots the sampling points in the vmin-logeta plane.
"""
if plot_close:
plt.close()
print("sampling_size =", self.vmin_logeta_sampling_table.shape)
for tab in self.vmin_logeta_sampling_table:
plt.plot(tab[:, 0], tab[:, 1], 'o')
if plot_optimum:
self.PlotOptimum(xlim_percentage=(0.9, 1.1), ylim_percentage=(1.2, 0.8),
plot_close=False, plot_show=plot_show)
elif plot_show:
plt.show()
return
def GetLikelihoodTable(self, index, output_file_tail, logeta_index_range, extra_tail):
""" Prints to file lists of the form [logetaStar_ij, logL_ij] needed for
1D interpolation, where i is the index corresponding to vminStar_i and j is
the index for each logetaStar. Each file corresponds to a different index i.
Here only one file is written for a specific vminStar.
Input:
index: int
Index of vminStar.
output_file_tail: string
Tag to be added to the file name.
logeta_index_range: tuple
A touple (index0, index1) between which logetaStar will be considered.
If this is None, then the whole list of logetaStar is used.
extra_tail: string
Additional tail to be added to filenames.
"""
print('index =', index)
print('output_file_tail =', output_file_tail)
vminStar = self.vmin_logeta_sampling_table[index, 0, 0]
logetaStar_list = self.vmin_logeta_sampling_table[index, :, 1]
plot = False
if logeta_index_range is not None:
logetaStar_list = \
logetaStar_list[logeta_index_range[0]: logeta_index_range[1]]
plot = True
print("vminStar =", vminStar)
table = np.empty((0, 2))
for logetaStar in logetaStar_list:
try:
constr_opt = self.ConstrainedOptimalLikelihood(vminStar, logetaStar,
plot=plot)
except:
print("error")
os.system("say Error")
pass
else:
print("index =", index, "; vminStar =", vminStar,
"; logetaStar =", logetaStar, "; constr_opt =", constr_opt)
table = np.append(table, [[logetaStar, constr_opt]], axis=0)
# table = np.append(table, [logetaStar])
print("vminStar =", vminStar, "; table =", table)
if True:
temp_file = output_file_tail + "_" + str(index) + \
"_LogetaStarLogLikelihoodList" + extra_tail + ".dat"
print(temp_file)
np.savetxt(temp_file, table)
return
def LogLikelihoodList(self, output_file_tail, extra_tail="", processes=None,
vmin_index_list=None, logeta_index_range=None):
""" Loops thorugh the list of all vminStar and calls GetLikelihoodTable,
which will print the likelihood tables to files.
Input:
output_file_tail: string
Tag to be added to the file name.
extra_tail: string, optional
Additional tail to be added to filenames.
processes: int, optional
Number of processes for parallel programming.
vmin_index_list: ndarray, optional
List of indices in vminStar_list for which we calculate the optimal
likelihood. If not given, the whole list of vminStars is used.
logeta_index_range: tuple, optional
Atuple (index0, index1) between which logetaStar will be considered.
If not given, then the whole list of logetaStar is used.
"""
if vmin_index_list is None:
vmin_index_list = range(0, self.vmin_logeta_sampling_table.shape[0])
else:
try:
len(vmin_index_list)
except TypeError:
vmin_index_list = range(vmin_index_list,
self.vmin_logeta_sampling_table.shape[0])
print("vmin_index_list =", vmin_index_list)
print("logeta_index_range =", logeta_index_range)
kwargs = ({'index': index,
'output_file_tail': output_file_tail,
'logeta_index_range': logeta_index_range,
'extra_tail': extra_tail}
for index in vmin_index_list)
par.parmap(self.GetLikelihoodTable, kwargs, processes)
return
def _logL_interp(vars_list, constraints):
constr_not_valid = constraints(vars_list)[:-1] < 0
if np.any(constr_not_valid):
constr_list = constraints(vars_list)[constr_not_valid]
return -constr_list.sum() * 10**2
return logL_interp(vars_list)
def ConfidenceBand(self, output_file_tail, delta_logL, interpolation_order,
extra_tail="", multiplot=True):
""" Compute the confidence band.
Input:
output_file_tail: string
Tag to be added to the file name.
delta_logL: float
Target difference between the constrained minimum and the
unconstrained global minimum of MinusLogLikelihood.
interpolation_order: int
interpolation order for the interpolated constrained minimum of
MinusLogLikelihood as a function of logeta, for a fixed vmin.
extra_tail: string, optional
Additional tail to be added to filenames.
multiplot: bool, optional
Whether to plot log(L) as a function of logeta for each vmin, and the
horizontal line corresponding to a given delta_logL.
"""
print("self.vmin_sampling_list =", self.vmin_sampling_list)
self.vmin_logeta_band_low = []
self.vmin_logeta_band_up = []
vmin_last_step = self.optimal_vmin[-1]
if multiplot:
plt.close()
for index in range(self.vmin_sampling_list.size):
print("index =", index)
print("vmin =", self.vmin_sampling_list[index])
logeta_optim = self.OptimumStepFunction(min(self.vmin_sampling_list[index],
vmin_last_step))
file = output_file_tail + "_" + str(index) + \
"_LogetaStarLogLikelihoodList" + extra_tail + ".dat"
try:
with open(file, 'r') as f_handle:
table = np.loadtxt(f_handle)
except:
continue
x = table[:, 0] # this is logeta
y = table[:, 1] # this is logL
logL_interp = interpolate.interp1d(x, y, kind='cubic')
def _logL_interp(vars_list, constraints):
constr_not_valid = constraints(vars_list)[:-1] < 0
if np.any(constr_not_valid):
constr_list = constraints(vars_list)[constr_not_valid]
return -constr_list.sum() * 1e2
return logL_interp(vars_list)
print(self.optimal_logL - delta_logL)
print(np.array([table[0, 0]]), " ", table[-1, 0])
print(logeta_optim)
def constr_func(logeta, logeta_min=np.array([table[0, 0]]),
logeta_max=np.array([table[-1, 0]])):
return np.concatenate([logeta - logeta_min, logeta_max - logeta])
constr = ({'type': 'ineq', 'fun': constr_func})
try:
logeta_minimLogL = minimize(_logL_interp, np.array([logeta_optim]),
args=(constr_func,), constraints=constr).x[0]
except ValueError:
print("ValueError at logeta_minimLogL")
logeta_minimLogL = logeta_optim
pass
print("logeta_minimLogL =", logeta_minimLogL)
print("x =", x)
print("y =", y)
if multiplot:
plt.close()
plt.plot(x, y, 'o-')
plt.plot(x, (self.optimal_logL + 1) * np.ones_like(y))
plt.plot(x, (self.optimal_logL + 2.7) * np.ones_like(y))
plt.title("index =" + str(index) + ", v_min =" +
str(self.vmin_sampling_list[index]) + "km/s")
plt.xlim(x[0], x[-1])
plt.ylim(-5, 20)
plt.show()
error = F
try:
if y[0] > self.optimal_logL + delta_logL and \
logeta_minimLogL < self.optimal_logL + delta_logL:
sol = brentq(lambda logeta: logL_interp(logeta) - self.optimal_logL -
delta_logL,
table[0, 0], logeta_minimLogL)
self.vmin_logeta_band_low += \
[[self.vmin_sampling_list[index], sol]]
except ValueError:
print("ValueError: Error in calculating vmin_logeta_band_low")
error = T
try:
if y[-1] > self.optimal_logL + delta_logL and \
logeta_minimLogL < self.optimal_logL + delta_logL:
sol = brentq(lambda logeta: logL_interp(logeta) - self.optimal_logL -
delta_logL,
logeta_minimLogL, table[-1, 0])
self.vmin_logeta_band_up += \
[[self.vmin_sampling_list[index], sol]]
except ValueError:
print("ValueError: Error in calculating vmin_logeta_band_hi")
error = T
if error:
plt.close()
plt.plot(x, (self.optimal_logL + 1) * np.ones_like(y))
plt.plot(x, (self.optimal_logL + 2.7) * np.ones_like(y))
plt.title("index =" + str(index) + "; v_min =" +
str(self.vmin_sampling_list[index]) + "km/s")
plt.xlim(x[0], x[-1])
plt.ylim([-5, 20])
plt.plot(x, y, 'o-', color="r")
plt.plot(logeta_optim, logL_interp(logeta_optim), '*')
plt.plot(logeta_optim, self.optimal_logL, '*')
print("ValueError")
plt.show()
# raise
pass
if multiplot:
plt.show()
self.vmin_logeta_band_low = np.array(self.vmin_logeta_band_low)
self.vmin_logeta_band_up = np.array(self.vmin_logeta_band_up)
print("lower band: ", self.vmin_logeta_band_low)
print("upper band: ", self.vmin_logeta_band_up)
self.PlotConfidenceBand()
delta_logL = round(delta_logL, 1)
file = output_file_tail + "_FoxBand_low_deltalogL_" + str(delta_logL) + ".dat"
print(file)
np.savetxt(file, self.vmin_logeta_band_low)
file = output_file_tail + "_FoxBand_up_deltalogL_" + str(delta_logL) + ".dat"
print(file)
np.savetxt(file, self.vmin_logeta_band_up)
return
def PlotConfidenceBand(self):
""" Plot the confidence band and the best-fit function.
"""
plt.close()
try:
plt.plot(self.vmin_logeta_band_low[:, 0], self.vmin_logeta_band_low[:, 1], 'o-')
except IndexError:
pass
try:
plt.plot(self.vmin_logeta_band_up[:, 0], self.vmin_logeta_band_up[:, 1], 'o-')
except IndexError:
pass
self.PlotOptimum(ylim_percentage=(1.2, 0.8), plot_close=F, plot_show=T)
def ImportConfidenceBand(self, output_file_tail, delta_logL, extra_tail=""):
""" Import the confidence band from file.
Input:
output_file_tail: string
Tag to be added to the file name.
delta_logL: float
Target difference between the constrained minimum and the
unconstrained global minimum of MinusLogLikelihood.
extra_tail: string, optional
Additional tail to be added to filenames.
"""
delta_logL = round(delta_logL, 1)
file = output_file_tail + "_FoxBand_low_deltalogL_" + str(delta_logL) + \
extra_tail + ".dat"
print(file)
with open(file, 'r') as f_handle:
self.vmin_logeta_band_low = np.loadtxt(f_handle)
file = output_file_tail + "_FoxBand_up_deltalogL_" + str(delta_logL) + \
extra_tail + ".dat"
with open(file, 'r') as f_handle:
self.vmin_logeta_band_up = np.loadtxt(f_handle)
return
| gpl-2.0 |
caiostringari/swantools | test.py | 1 | 2740 |
import swantools.io
import swantools.utils
import swantools.plot
import datetime
import matplotlib.pyplot as plt
import numpy as np
def readtable():
R = swantools.io.SwanIO()
P = swantools.plot.SwanPlot()
# Reading TABLE dada with headers:
df = R.read_swantable('data/table.txt')
y = df["Hsig"]
x = df.index.values
P.timeseries(x,y,"Significant Wave Heights")
def readspc():
# Reading spectral data
R = swantools.io.SwanIO()
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
P = swantools.plot.SwanPlot()
P.spcplot(freqs,dirs,times[15],spectrum[15,:,:]*factors[15])
# for t, time in enumerate(times):
# P.spcplot(freqs,dirs,times[t],spectrum[t,:,:])
def readblock(mode):
R = swantools.io.SwanIO()
P = swantools.plot.SwanPlot()
if mode == "non-stat":
# Reading a block file - Non stationary example
lon,lat,times,hs = R.read_swanblock('data/block.mat','Hsig')
P.blockplot(lon,lat,hs[0,:,:],"Non-stationary Results")
# for t, time in enumerate(times):
# P.blockplot(lon,lat,hs[t,:,:],time.strftime("%Y%m%d %H:%M"))
elif mode == "stat":
# Reading a block file - Non stationary example
lon,lat,times,hs = R.read_swanblock('data/stat_block.mat','Hsig',stat=True)
P.blockplot(lon,lat,hs,"Stationary Results")
def writescp():
# Getting some data to play with
R = swantools.io.SwanIO()
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
# Re-writing the data
R.write_spectrum("spcout.spc",lat,lon,times,freqs,dirs,factors,spectrum)
# Plot to confirm
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('spcout.spc')
P = swantools.plot.SwanPlot()
for t, time in enumerate(times):
P.spcplot(freqs,dirs,times[t],spectrum[t,:,:])
def netcdf_output():
R = swantools.io.SwanIO()
W = swantools.io.Converters()
lon,lat,times,hs = R.read_swanblock('data/block.mat','Hsig')
W.np2nc("Hsig.nc",lat,lon,times,hs,"Significant Wave Height")
def spectral_output():
R = swantools.io.SwanIO()
W = swantools.io.Converters()
lon,lat,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
W.spc2nc("spectrum.nc",lat,lon,freqs,dirs,times,factors,spectrum)
if __name__ == "__main__":
# # Table data
# import seaborn as sns
# with sns.axes_style("darkgrid"):
# readtable()
# Spectral data
readspc()
# Field data
readblock("non-stat")
# Convertung block to netCDF4
netcdf_output()
# Converting spctral file to netCDF4
spectral_output()
# Wrinting spctral data
writescp()
| gpl-2.0 |
tpsatish95/OCR-on-Indus-Seals | code/Test/TextROI.py | 1 | 16306 | # -*- coding: utf-8 -*-
import skimage.io
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import selectivesearch
import numpy as np
import skimage.transform
import os
import shutil
import caffe
from PIL import Image
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
text_cut_final = set()
def getClass(FileList):
caffe.set_mode_gpu()
classifier = caffe.Classifier("../ROIs_Indus/deploy.prototxt","../ROIs_Indus/Models/bvlc_googlenet_indusnet_iter_20000.caffemodel" ,
image_dims=[224,224], raw_scale=255.0, channel_swap = [2,1,0])
inputs = [caffe.io.load_image(im_f) for im_f in FileList]
print("Classifying %d inputs." % len(inputs))
predictions = classifier.predict(inputs)
return predictions
def texbox_ext():
global text
global both_text
global text_cut_final
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in both_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1']
aw = A['x1'] + A['w'] - B['x1']
overf = 1
# if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
if A['x1'] < B['x1']: # B is right to A
aw = B['x1']+B['w'] - A['x1']
overf = 1
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
text_cut_final = text_cut_final - both_text # CHANGE THIS LINE
def texbox_cut():
global no_text
no_text = no_text.union(both_text)
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in no_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1'] + B['w']
overf = 1
if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
ah = A['h'] - (A['y1']+A['h'] - B['y1'])
overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
# if A['x1'] < B['x1']: # B is right to A
# aw = A['w'] - (A['x1']+A['w'] - B['x1'])
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
def extend_text_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def draw_textbox():
global width, height
thresh = ((width+height)/2)*(0.25)
tempc = set()
for x, y, w, h in text_boxes:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
f = 0
for x1, y1, w1, h1 in text_boxes:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
f = 1
if f == 0:
text.add((x, y, w, h))
text.add(extend_text_rect(temp))
def contains():
x1, y1, w1, h1 = p
for x, y, w, h in candidates:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
return True
if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
candidates.remove((x, y, w, h))
return False
return False
def extend_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def extend_superbox():
global width, height
thresh = ((width+height)/2)*(0.06)
tempc = set()
for x, y, w, h in final:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in final:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
final_extended.add(extend_rect(temp))
def draw_superbox(finals=[]):
noover = []
refinedT = []
global final
final = set()
# (x1,y1) top-left coord, (x2,y2) bottom-right coord, (w,h) size
if finals != []:
refinedT = finals
else:
refinedT = refined
remp = set(refinedT)
ref = list(refinedT)
while len(ref) > 0:
x1, y1, w1, h1 = ref[0]
if len(ref) == 1: # final box
final.add((x1, y1, w1, h1))
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
else:
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
over = set()
for x2, y2, w2, h2 in remp:
A = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
B = {'x1': x2, 'y1': y2, 'x2': x2+w2, 'y2': y2+h2, 'w': w2, 'h': h2}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overlap_A = float(SI) / float(SA)
overlap_B = float(SI) / float(SB)
# print(overlap_AB)
#
if overlap_A >= 0.40 or overlap_B >= 0.40:
over.add((B['x1'],B['y1'],B['w'],B['h']))
# print(len(over))
if len(over) != 0: #Overlap
remp = remp - over
for i in over: ref.remove(i)
over.add((A['x1'],A['y1'],A['w'],A['h']))
# print(over)
final.add((min([i[0] for i in over]), min([i[1] for i in over]), max([i[0]+i[2] for i in over]) - min([i[0] for i in over]), max([i[1]+i[3] for i in over]) - min([i[1] for i in over])))
# final.add((np.mean([i[0] for i in over]), np.mean([i[1] for i in over]), np.mean([i[2] for i in over]), np.mean([i[3] for i in over])))
noover.append(False)
else: #No overlap
final.add((x1,y1,w1,h1))
noover.append(True)
if all(noover):
return
else:
draw_superbox(final)
return
def contains_remove():
for x, y, w, h in merged_candidates:
f = False
temp = set(merged_candidates)
temp.remove((x, y, w, h))
for x1, y1, w1, h1 in temp:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
f = False
break
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
else:
f = True
if f == True:
refined.add((x, y, w, h))
# def contains_remove():
# for x, y, w, h in merged_candidates:
# temp = set(merged_candidates)
# temp.remove((x, y, w, h))
# test = []
# for x1, y1, w1, h1 in temp:
# A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
# B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# # overlap between A and B
# SA = A['w']*A['h']
# SB = B['w']*B['h']
# SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
# SU = SA + SB - SI
# overlap_AB = float(SI) / float(SU)
# if overlap_AB > 0.0:
# # if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
# test.append(False)
# else:
# test.append(True)
# else:
# test.append(True)
# if all(test):
# refined.add((x, y, w, h))
def mean_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[1]+i[3] for i in l]) - min([i[1] for i in l]))
def merge():
global width, height
thresh = int(((width+height)/2)*(0.14))
tempc = set()
for x, y, w, h in candidates:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in candidates:
if abs(x1-x) <= thresh and abs(y1-y) <= thresh and abs(w1-w) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
merged_candidates.add(mean_rect(temp))
contains_remove()
for name in os.listdir("./Images"):
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
print("Processing Image " + name.split(".")[0])
fname = "./Images/" + name
print(fname)
img = skimage.io.imread(fname)
width = len(img[0])
height = len(img)
# new_size = 256
# height = int(new_size * height / width)
# width = new_size
if width*height < 256*256*(0.95) and abs(width-height) <= 3 :
new_size = 512
height = int(new_size * height / width)
width = new_size
print("A")
elif width*height < 220*220*(1.11):
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B")
elif width*height < 256*256:
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B1")
elif width*height > 512*512*(0.99) and width < 800 and height < 800:
new_size = 512
height = int(new_size * height / width)
width = new_size
print("C")
elif width*height < 512*512*(0.95) and width*height > 256*256*(1.15):
new_size = 512
height = int(new_size * height / width)
width = new_size
print("D")
tried = []
while True:
tried.append(width)
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
stage = 1
text_cut_final = set()
for sc in [350,450,500]:
for sig in [0.8]:
for mins in [30,60,120]: # important
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]):
pass
else:
img = skimage.transform.resize(img, (height, width))
img_lbl, regions = selectivesearch.selective_search(
img, scale=sc, sigma= sig,min_size = mins)
for r in regions:
# excluding same rectangle (with different segments)
if r['rect'] in candidates:
continue
# excluding regions smaller than 2000 pixels
if r['size'] < 2000:
continue
# distorted rects
x, y, w, h = r['rect']
if w / h > 1.2 or h / w > 1.2:
continue
if w >= (img.shape[0]-1)*(0.7) and h >= (img.shape[1]-1)*(0.7):
continue
candidates.add(r['rect'])
print("Stage " + str(stage) + " Complete.")
stage+=1
print(candidates)
merge()
print(refined)
draw_superbox()
print(final)
extend_superbox()
print(final_extended)
os.makedirs("Regions/"+name.split(".")[0])
# draw rectangles on the original image
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in final_extended:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Regions/"+name.split(".")[0]+"/FinalRegions.png")
plt.close('all')
img1 = skimage.io.imread(fname)[:,:,:3]
if height == len(img1) and width == len(img1[0]): pass
else: img1 = skimage.transform.resize(img1, (height, width))
# imgT = Image.open(fname).convert('L')
# w, h = imgT.size
# if height == h and width == w:
# pass
# else:
# # img1 = skimage.transform.resize(img1, (height, width))
# imgT = imgT.resize((width,height), Image.ANTIALIAS)
ij = 1
fList = []
box_list = []
for x, y, w, h in final_extended:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg", img1[y:y+h,x:x+w])
# imgT.crop((x,y,x+w,y+h)).save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
# imgT = Image.open("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.png").convert('L')
# imgT.save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
fList.append("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg")
box_list.append((x, y, w, h))
ij+=1
# classify text no text
text_boxes=set()
text = set()
no_text = set()
both_text = set()
text_cut_final = set()
i = 0
try:
a = getClass(fList)
l = np.array([0,1,2])
for pred in a:
idx = list((-pred).argsort())
pred = l[np.array(idx)]
if pred[0] == 1 or pred[0] == 2:
text_boxes.add(box_list[i])
elif pred[0] == 0:
no_text.add(box_list[i])
if pred[0] == 2:
both_text.add(box_list[i])
print(pred)
i+=1
except:
print("No Text Regions")
draw_textbox()
print(text)
texbox_cut()
print(text_cut)
texbox_ext()
print(text_cut_final)
# draw rectangles on the original image
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]): pass
else: img = skimage.transform.resize(img, (height, width))
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in text_cut_final:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Result/final_"+name.split(".")[0]+".png")
plt.close('all')
ij = 1
for x, y, w, h in text_cut_final:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_text.png", img[y:y+h,x:x+w])
ij+=1
# min area check
minf = 0
for x, y, w, h in text_cut_final:
if w*h < width*height*0.20 and (w < width*0.20 or h < height*0.20):
minf = 1
if (len(text_cut_final) == 0 or minf == 1) and len(tried) < 3:
print(tried)
print("New size being tried.")
shutil.rmtree("Regions/"+name.split(".")[0]+"/")
img = skimage.io.imread(fname)
twidth = len(img[0])
theight = len(img)
new_size = list(set([256,512,twidth]) - set(tried))[0]
height = int(new_size * theight / twidth)
width = new_size
else:
break
| apache-2.0 |