repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cosmoharrigan/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
rhoscanner-team/pcd-plotter | delaunay_example.py | 1 | 1435 | import numpy as np
from scipy.spatial import Delaunay
points = np.random.rand(30, 2) # 30 points in 2-d
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
# -- the same stuff for the convex hull
edges = set()
edge_points = []
for ia, ib in tri.convex_hull:
add_edge(ia, ib)
lines = LineCollection(edge_points)
plt.figure()
plt.title('Convex hull')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
| gpl-2.0 |
remenska/rootpy | rootpy/plotting/contrib/plot_corrcoef_matrix.py | 5 | 12192 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from ...extern.six.moves import range
from ...extern.six import string_types
__all__ = [
'plot_corrcoef_matrix',
'corrcoef',
'cov',
]
def plot_corrcoef_matrix(matrix, names=None,
cmap=None, cmap_text=None,
fontsize=12, grid=False,
axes=None):
"""
This function will draw a lower-triangular correlation matrix
Parameters
----------
matrix : 2-dimensional numpy array/matrix
A correlation coefficient matrix
names : list of strings, optional (default=None)
List of the parameter names corresponding to the rows in ``matrix``.
cmap : matplotlib color map, optional (default=None)
Color map used to color the matrix cells.
cmap_text : matplotlib color map, optional (default=None)
Color map used to color the cell value text. If None, then
all values will be black.
fontsize : int, optional (default=12)
Font size of parameter name and correlation value text.
grid : bool, optional (default=False)
If True, then draw dashed grid lines around the matrix elements.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
Notes
-----
NumPy and matplotlib are required
Examples
--------
>>> matrix = corrcoef(data.T, weights=weights)
>>> plot_corrcoef_matrix(matrix, names)
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
if axes is None:
axes = plt.gca()
matrix = np.asarray(matrix)
if matrix.ndim != 2:
raise ValueError("matrix is not a 2-dimensional array or matrix")
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix is not square")
if names is not None and len(names) != matrix.shape[0]:
raise ValueError("the number of names does not match the number of "
"rows/columns in the matrix")
# mask out the upper triangular matrix
matrix[np.triu_indices(matrix.shape[0])] = np.nan
if isinstance(cmap_text, string_types):
cmap_text = cm.get_cmap(cmap_text, 201)
if cmap is None:
cmap = cm.get_cmap('jet', 201)
elif isinstance(cmap, string_types):
cmap = cm.get_cmap(cmap, 201)
# make NaN pixels white
cmap.set_bad('w')
axes.imshow(matrix, interpolation='nearest',
cmap=cmap, origin='upper',
vmin=-1, vmax=1)
axes.set_frame_on(False)
plt.setp(axes.get_yticklabels(), visible=False)
plt.setp(axes.get_yticklines(), visible=False)
plt.setp(axes.get_xticklabels(), visible=False)
plt.setp(axes.get_xticklines(), visible=False)
if grid:
# draw grid lines
for slot in range(1, matrix.shape[0] - 1):
# vertical
axes.plot((slot - 0.5, slot - 0.5),
(slot - 0.5, matrix.shape[0] - 0.5), 'k:', linewidth=1)
# horizontal
axes.plot((-0.5, slot + 0.5),
(slot + 0.5, slot + 0.5), 'k:', linewidth=1)
if names is not None:
for slot in range(1, matrix.shape[0]):
# diagonal
axes.plot((slot - 0.5, slot + 1.5),
(slot - 0.5, slot - 2.5), 'k:', linewidth=1)
# label cell values
for row, col in zip(*np.tril_indices(matrix.shape[0], k=-1)):
value = matrix[row][col]
if cmap_text is not None:
color = cmap_text((value + 1.) / 2.)
else:
color = 'black'
axes.text(
col, row,
"{0:d}%".format(int(value * 100)),
color=color,
ha='center', va='center',
fontsize=fontsize)
if names is not None:
# write parameter names
for i, name in enumerate(names):
axes.annotate(
name, (i, i),
rotation=45,
ha='left', va='bottom',
transform=axes.transData,
fontsize=fontsize)
def cov(m, y=None, rowvar=1, bias=0, ddof=None, weights=None, repeat_weights=0):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
import numpy as np
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
X = np.array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None), np.newaxis)
else:
axis = 1
tup = (np.newaxis, slice(None))
if y is not None:
y = np.array(y, copy=False, ndmin=2, dtype=float)
X = np.concatenate((X, y), axis)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if weights is not None:
weights = np.array(weights, dtype=float)
weights_sum = weights.sum()
if weights_sum <= 0:
raise ValueError(
"sum of weights is non-positive")
X -= np.average(X, axis=1-axis, weights=weights)[tup]
if repeat_weights:
# each weight represents a number of repetitions of an observation
# the total sample size can be determined in this case and we have
# both an unbiased and biased weighted covariance
fact = weights_sum - ddof
else:
# normalize weights so they sum to unity
weights /= weights_sum
# unbiased weighted covariance is not defined if the weights are
# not integral frequencies (repeat-type)
fact = (1. - np.power(weights, 2).sum())
else:
weights = 1
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
fact = float(N - ddof)
if not rowvar:
return (np.dot(weights * X.T, X.conj()) / fact).squeeze()
else:
return (np.dot(weights * X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None, weights=None,
repeat_weights=0):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
import numpy as np
c = cov(x, y, rowvar, bias, ddof, weights, repeat_weights)
if c.size == 0:
# handle empty arrays
return c
try:
d = np.diag(c)
except ValueError: # scalar covariance
return 1
return c / np.sqrt(np.multiply.outer(d, d))
| gpl-3.0 |
hetajen/vnpy161 | vn.trader/ctaStrategy/ctaBacktesting.py | 1 | 40890 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
History
<id> <author> <description>
2017051200 hetajen 样例:策略回测和优化
'''
from __future__ import division
'''2017051200 Add by hetajen begin'''
import time
'''2017051200 Add by hetajen end'''
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import pymongo
from ctaBase import *
from vtConstant import *
from vtGateway import VtOrderData, VtTradeData
from vtFunction import loadMongoSetting
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 引擎类型为回测
self.engineType = ENGINETYPE_BACKTESTING
# 回测相关
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.startDate = ''
self.initDays = 0
self.endDate = ''
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.priceTick = 0 # 价格最小变动
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
#self.historyData = [] # 历史数据的列表,回测用
self.initData = [] # 初始化用的数据
#self.backtestingData = [] # 回测用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.limitOrderCount = 0 # 限价单编号
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.startDate = startDate
self.initDays = initDays
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
self.endDate = endDate
if endDate:
self.dataEndDate= datetime.strptime(endDate, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.dataEndDate.replace(hour=23, minute=59)
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
host, port, logging = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
# 载入初始化需要用的数据
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt)
# 将数据从查询指针中读取出,并生成列表
self.initData = [] # 清空initData列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt)
self.output(u'载入完成,数据量:%s' %(initCursor.count() + self.dbCursor.count()))
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.inited = True
self.strategy.onInit()
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = self.roundToPriceTick(price)
order.totalVolume = volume
order.status = STATUS_NOTTRADED # 刚提交尚未成交
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = str(self.dt)
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return orderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = str(self.dt)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = self.roundToPriceTick(price)
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 判断是否会成交
buyCross = (order.direction==DIRECTION_LONG and
order.price>=buyCrossPrice and
buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交
sellCross = (order.direction==DIRECTION_SHORT and
order.price<=sellCrossPrice and
sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
so.status = STOPORDER_TRIGGERED
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.strategy.onOrder(order)
self.limitOrderDict[orderID] = order
# 从字典中删除该限价单
if stopOrderID in self.workingStopOrderDict:
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print str(datetime.now()) + "\t" + content
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
tradeTimeList = [] # 每笔成交时间戳
posList = [0] # 每笔成交后的持仓情况
for trade in self.tradeDict.values():
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([-1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = 0 # 这里把数据都初始化为0
averageLosing = 0
profitLossRatio = 0
if winningResult:
averageWinning = totalWinning/winningResult # 平均每笔盈利
if losingResult:
averageLosing = totalLosing/losingResult # 平均每笔亏损
if averageLosing:
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
d['posList'] = posList
d['tradeTimeList'] = tradeTimeList
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning']))
self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
import matplotlib.pyplot as plt
import numpy as np
try:
import seaborn as sns # 如果安装了seaborn则设置为白色风格
sns.set_style('whitegrid')
except ImportError:
pass
pCapital = plt.subplot(4, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'], color='r', lw=0.8)
pDD = plt.subplot(4, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g')
pPnl = plt.subplot(4, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50, color='c')
pPos = plt.subplot(4, 1, 4)
pPos.set_ylabel("Position")
if d['posList'][-1] == 0:
del d['posList'][-1]
tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']]
xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10))
tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex)
pPos.plot(d['posList'], color='k', drawstyle='steps-pre')
pPos.set_ylim(-1.2, 1.2)
plt.sca(pPos)
plt.tight_layout()
plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15
plt.show()
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def setPriceTick(self, priceTick):
"""设置价格最小变动"""
self.priceTick = priceTick
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
d = self.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
return result
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
#----------------------------------------------------------------------
def runParallelOptimization(self, strategyClass, optimizationSetting):
"""并行优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 多进程优化,启动一个对应CPU核心数量的进程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
l = []
for setting in settingList:
l.append(pool.apply_async(optimize, (strategyClass, setting,
targetName, self.mode,
self.startDate, self.initDays, self.endDate,
self.slippage, self.rate, self.size,
self.dbName, self.symbol)))
pool.close()
pool.join()
# 显示结果
resultList = [res.get() for res in l]
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
#----------------------------------------------------------------------
def roundToPriceTick(self, price):
"""取整价格到合约最小价格变动"""
if not self.priceTick:
return price
newPrice = round(price/self.priceTick, 0) * self.priceTick
return newPrice
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end=None, step=None):
"""增加优化参数"""
if end is None and step is None:
self.paramDict[name] = [start]
return
if end < start:
print u'参数起始点必须不大于终止点'
return
if step <= 0:
print u'参数布进必须大于0'
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
rn = round(n, 2) # 保留两位小数
return format(rn, ',') # 加上千分符
#----------------------------------------------------------------------
def optimize(strategyClass, setting, targetName,
mode, startDate, initDays, endDate,
slippage, rate, size,
dbName, symbol):
"""多进程优化时跑在每个进程中运行的函数"""
engine = BacktestingEngine()
engine.setBacktestingMode(mode)
engine.setStartDate(startDate, initDays)
engine.setEndDate(endDate)
engine.setSlippage(slippage)
engine.setRate(rate)
engine.setSize(size)
engine.setDatabase(dbName, symbol)
engine.initStrategy(strategyClass, setting)
engine.runBacktesting()
d = engine.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
return (str(setting), targetValue)
'''2017051200 Modify by hetajen begin'''
from strategy.strategyAtrRsi import AtrRsiStrategy
def getEngine():
engine = BacktestingEngine()
engine.setBacktestingMode(engine.BAR_MODE) # 引擎的回测模式为K线
engine.setStartDate('20120101') # 回测用的数据起始日期
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
return engine
def getParam(type=0):
if type == 0:
setting = {'atrLength': 11}
else:
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
return setting
def xh_backTesting():
engine = getEngine()
setting = getParam()
engine.initStrategy(AtrRsiStrategy, setting)
engine.runBacktesting() # 开始跑回测
engine.showBacktestingResult() # 显示回测结果
def xh_optimize():
engine = getEngine()
setting = getParam(1)
engine.runOptimization(AtrRsiStrategy, setting) # 单进程优化。耗时:xxx秒
#engine.runParallelOptimization(AtrRsiStrategy, setting) # 多进程优化。耗时:xx秒
if __name__ == '__main__':
start = time.time()
xh_backTesting()
xh_optimize()
print u'耗时:%s' % (time.time() - start) # 性能测试
'''2017051200 Modify by hetajen end'''
| mit |
aureooms/networkx | examples/algorithms/blockmodel.py | 12 | 3014 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H = next(nx.connected_component_subgraphs(G))
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/testing/compare.py | 11 | 12935 | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
cmd = lambda old, new: \
[gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
cmd = lambda old, new: \
['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np.tostring(), width, height, output)
| mit |
PennyDreadfulMTG/Penny-Dreadful-Tools | decksite/charts/chart.py | 1 | 2940 | import os.path
import pathlib
from typing import Dict
import matplotlib as mpl
# This has to happen before pyplot is imported to avoid needing an X server to draw the graphs.
# pylint: disable=wrong-import-position
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from decksite.data import deck
from shared import configuration, logger
from shared.pd_exception import DoesNotExistException, OperationalException
def cmc(deck_id: int, attempts: int = 0) -> str:
if attempts > 3:
msg = 'Unable to generate cmc chart for {id} in 3 attempts.'.format(id=deck_id)
logger.error(msg)
raise OperationalException(msg)
path = determine_path(str(deck_id) + '-cmc.png')
if acceptable_file(path):
return path
d = deck.load_deck(deck_id)
costs: Dict[str, int] = {}
for ci in d.maindeck:
c = ci.card
if c.is_land():
continue
if c.mana_cost is None:
cost = '0'
elif next((s for s in c.mana_cost if '{X}' in s), None) is not None:
cost = 'X'
else:
converted = int(float(c.cmc))
cost = '7+' if converted >= 7 else str(converted)
costs[cost] = ci.get('n') + costs.get(cost, 0)
path = image(path, costs)
if acceptable_file(path):
return path
return cmc(deck_id, attempts + 1)
def image(path: str, costs: Dict[str, int]) -> str:
ys = ['0', '1', '2', '3', '4', '5', '6', '7+', 'X']
xs = [costs.get(k, 0) for k in ys]
sns.set_style('white')
sns.set(font='Concourse C3', font_scale=3)
g = sns.barplot(x=ys, y=xs, palette=['#cccccc'] * len(ys)) # pylint: disable=no-member
g.axes.yaxis.set_ticklabels([])
rects = g.patches
sns.set(font='Concourse C3', font_scale=2)
for rect, label in zip(rects, xs):
if label == 0:
continue
height = rect.get_height()
g.text(rect.get_x() + rect.get_width() / 2, height + 0.5, label, ha='center', va='bottom')
g.margins(y=0, x=0)
sns.despine(left=True, bottom=True)
g.get_figure().savefig(path, transparent=True, pad_inches=0, bbox_inches='tight')
plt.clf() # Clear all data from matplotlib so it does not persist across requests.
return path
def determine_path(name: str) -> str:
charts_dir = configuration.get_str('charts_dir')
pathlib.Path(charts_dir).mkdir(parents=True, exist_ok=True)
if not os.path.exists(charts_dir):
raise DoesNotExistException('Cannot store graph images because {charts_dir} does not exist.'.format(charts_dir=charts_dir))
return os.path.join(charts_dir, name)
def acceptable_file(path: str) -> bool:
if not os.path.exists(path):
return False
if os.path.getsize(path) >= 6860: # This is a few bytes smaller than a completely empty graph on prod.
return True
logger.warning('Chart at {path} is suspiciously small.'.format(path=path))
return False
| gpl-3.0 |
ajdawson/windspharm | examples/iris/rws_example.py | 1 | 2190 | """Compute Rossby wave source from the long-term mean flow.
This example uses the iris interface.
Additional requirements for this example:
* iris (http://scitools.org.uk/iris/)
* matplotlib (http://matplotlib.org/)
* cartopy (http://scitools.org.uk/cartopy/)
"""
import warnings
import cartopy.crs as ccrs
import iris
import iris.plot as iplt
from iris.coord_categorisation import add_month
import matplotlib as mpl
import matplotlib.pyplot as plt
from windspharm.iris import VectorWind
from windspharm.examples import example_data_path
mpl.rcParams['mathtext.default'] = 'regular'
# Read zonal and meridional wind components from file using the iris module.
# The components are in separate files. We catch warnings here because the
# files are not completely CF compliant.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
uwnd = iris.load_cube(example_data_path('uwnd_mean.nc'))
vwnd = iris.load_cube(example_data_path('vwnd_mean.nc'))
uwnd.coord('longitude').circular = True
vwnd.coord('longitude').circular = True
# Create a VectorWind instance to handle the computations.
w = VectorWind(uwnd, vwnd)
# Compute components of rossby wave source: absolute vorticity, divergence,
# irrotational (divergent) wind components, gradients of absolute vorticity.
eta = w.absolutevorticity()
div = w.divergence()
uchi, vchi = w.irrotationalcomponent()
etax, etay = w.gradient(eta)
etax.units = 'm**-1 s**-1'
etay.units = 'm**-1 s**-1'
# Combine the components to form the Rossby wave source term.
S = eta * -1. * div - (uchi * etax + vchi * etay)
S.coord('longitude').attributes['circular'] = True
# Pick out the field for December at 200 hPa.
time_constraint = iris.Constraint(month='Dec')
add_month(S, 'time')
S_dec = S.extract(time_constraint)
# Plot Rossby wave source.
clevs = [-30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30]
ax = plt.subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
fill = iplt.contourf(S_dec * 1e11, clevs, cmap=plt.cm.RdBu_r, extend='both')
ax.coastlines()
ax.gridlines()
plt.colorbar(fill, orientation='horizontal')
plt.title('Rossby Wave Source ($10^{-11}$s$^{-1}$)', fontsize=16)
plt.show()
| mit |
gkulkarni/JetMorphology | fitjet_3d.py | 1 | 5370 | """
File: fitjet_3d.py
Fits a geometric model to mock jet data. Uses image subtraction;
otherwise same as fitjet.py
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.optimize as op
import emcee
import triangle
import sys
# These mock data are produced by jet3d.py.
a2 = np.fromfile('mockdata_3d_nc100.dat',dtype=np.float32)
def I(theta):
a, b, i, l, alpha, beta, gamma = theta
u = np.linspace(0.0, 20.0*np.pi, 1000)
def z(u):
return (a/(2.0*np.pi)) * u * (u/(2.0*np.pi))**beta
zv = z(u)
def x(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.cos(u)
def y(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.sin(u)
xv = x(u)
yv = y(u)
def ri(i):
return np.matrix([[np.cos(i), 0.0, np.sin(i)],[0.0, 1.0, 0.0],[-np.sin(i), 0.0, np.cos(i)]])
def rl(l):
return np.matrix([[np.cos(l), -np.sin(l), 0.0],[np.sin(l), np.cos(l), 0.0],[0.0, 0.0, 1.0]])
zvarr = zv*gamma
iarr = zvarr/zvarr.max()
iarr *= np.pi/2.0
c = np.dstack((xv, yv, zv))
c = np.squeeze(c)
d = np.zeros((1000,3))
lm = rl(l)
for n in range(1000):
d[n] = c[n]*ri(iarr[n])*lm
xv = d[:,0]
yv = d[:,1]
xv = xv[~np.isnan(xv)]
yv = yv[~np.isnan(yv)]
nc = 100
a = np.zeros((nc,nc),dtype=np.float32)
zl = xv.min() - 5.0
zu = xv.max() + 5.0
yl = yv.min() - 5.0
yu = yv.max() + 5.0
lz = zu - zl
ly = yu - yl
dz = lz/nc
dy = -ly/nc # Because "y" coordinate increases in opposite direction to "y" array index of a (or a2).
def zloc(cood):
return int((cood-zl)/dz) + 1
def yloc(cood):
return int((cood-yl)/dy) + 1
for i in xrange(xv.size):
zpos = zloc(xv[i])
ypos = yloc(yv[i])
a[ypos, zpos] += 1.0
return a.flatten()
def neglnlike(theta, intensity, intensity_err):
model = I(theta)
inv_sigma2 = 1.0/intensity_err**2
return 0.5*(np.sum((intensity-model)**2*inv_sigma2 - np.log(inv_sigma2)))
a2_err = np.zeros_like(a2)
a2_err += 0.1
theta_guess = (0.1, 10.0, 2.0, 3.0, 0.2, 2.0, 0.5)
result = op.minimize(neglnlike, theta_guess, args=(a2, a2_err), method='Nelder-Mead')
print result.x
print result.success
def lnprior(theta):
a, b, i, l, alpha, beta, gamma = theta
if (0.05 < a < 0.15 and
8.0 < b < 12.0 and
1.0 < i < 3.0 and
2.0 < l < 4 and
0.1 < alpha < 0.3 and
1.0 < beta < 3.0 and
0.3 < gamma < 0.7):
return 0.0
return -np.inf
def lnprob(theta, intensity, intensity_err):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp - neglnlike(theta, intensity, intensity_err)
ndim, nwalkers = 7, 100
pos = [result.x + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(a2, a2_err))
sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 100:, :].reshape((-1, ndim))
plot_chain = True
if plot_chain:
mpl.rcParams['font.size'] = '10'
nplots = 7
plot_number = 0
fig = plt.figure(figsize=(12, 6), dpi=100)
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,0], c='k', alpha=0.1)
ax.axhline(result.x[0], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$A$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,1], c='k', alpha=0.1)
ax.axhline(result.x[1], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel('$B$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,2], c='k', alpha=0.1)
ax.axhline(result.x[2], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$i_0$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\lambda_0$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\alpha$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\beta$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\gamma$')
ax.set_xlabel('step')
plt.savefig('chains.pdf',bbox_inches='tight')
mpl.rcParams['font.size'] = '14'
fig = triangle.corner(samples, labels=['$A$', '$B$', '$i_0$', r'$\lambda_0$', r'$\alpha$', r'$\beta$', r'$\gamma$'],
truths=result.x)
fig.savefig("triangle.pdf")
| mit |
sdbonin/SOQresearch | SOQswapRK4.py | 1 | 8364 | # -*- coding: utf-8 -*-
"""
This code uses a loop along with our set of coupled differential equations and
matrix math to create arrays of 4-vector quaternions.
The old plotting functions need to be updated and incorperated into the end of
this code or a better visualization solution needs to be found.
"""
#------------------------------------------------------------------------------
# Importing modules and copying functions
# AKA "Setting stuff up"
#------------------------------------------------------------------------------
import numpy as np
from time import time as checktime
# a set of init quaternions and the identity matrix for building general q-matrices
rm = np.identity(2)
im = np.array([[-1j,0],[0,1j]])
jm = np.array([[0,1],[-1,0]])
km = np.array([[0,-1j],[-1j,0]])
def vec_mat(v):
'''
Converts a quaternion vector into the 2x2 imaginary matrix representation
'''
return v[0]*rm + v[1]*im + v[2]*jm + v[3]*km
def mat_vec(M):
'''
Converts a 2x2 imaginary matrix quaternion into its vector representation
'''
return np.array([ M[1,1].real , M[1,1].imag , M[0,1].real , -M[0,1].imag ])
def qvecmult(vec1,vec2):
'''
Multiplies two 4-vector quaternions via matrix math
'''
return mat_vec(np.dot(vec_mat(vec1),vec_mat(vec2)))
def qmatcon(M):
'''
conjugates a 2x2 imaginary matrix quaternion
'''
return vec_mat(mat_vec(M)*np.array([1,-1,-1,-1]))
def qveccon(vec):
'''
conjugates 4-vector quaternion
'''
return vec*np.array([1,-1,-1,-1])
def qvecnorm(vec):
'''
normalizes a 4-vector quaternion
'''
return vec/np.sqrt(qvecmult(qveccon(vec),vec)[0])
def qmatnorm(M):
'''
piggy-backs off the previous function to normalize 2x2 imaginary matrices
'''
return vec_mat(qvecnorm(mat_vec(M)))
def qvecmagsqr(vec):
'''
returns the magnitude squared of a 4-vector quaternion
'''
return qvecmult(qveccon(vec),vec)[0]
def qmatmagsqr(M):
'''
piggy-backs off the previous function to give the magnitude squared of 2x2 imaginary matrix
quaternions
'''
return qvecmagsqr(mat_vec(M))
#------------------------------------------------------------------------------
# Defining the differential equations
# AKA "Bringing (first) order to the universe"
#------------------------------------------------------------------------------
def q1_dot(q1,q2,p1,p2,a):
'''
takes the current value of things that we know and calculates derivatives
Function assumes 2x2 complex matrices as inputs for q1,q2,p1,p2
a is the coupling constant
'''
return (p1 - a*np.dot(q1,np.dot(qmatcon(q2),p2))) \
#/(1. - qmatmagsqr(q1)*qmatmagsqr(q2)*a**2)
def p1_dot(q1,q2,q1dot,q2dot,a,w):
'''
takes the current values of things we know and the hopefully recently
calculated derivatives of q1,q2 and uses them to find other derivatives
'''
return a*np.dot(q1dot,np.dot(qmatcon(q2dot),q2)) - q1*w**2
#------------------------------------------------------------------------------
# Defining necessary constants and initial conditions
# AKA "on the first day..."
#------------------------------------------------------------------------------
w = 1. # \omega_0 in our notation
a = 0.01 # coupling constant. \alpha in our notation
print 'alpha =',a
seed = 42
np.random.seed(seed)
print 'seed =',seed
q1 = vec_mat([1,0,0,0])
q2 = vec_mat([1,0,0,0])
p1 = np.random.rand(4)
p2 = np.random.rand(4)
p1[0] = 0
p2[0] = 0
p1 = vec_mat(p1)
p2 = vec_mat(p2)
q1 = qmatnorm(q1)
q2 = qmatnorm(q2)
p1 = qmatnorm(p1)
p2 = qmatnorm(p2)
#------------------------------------------------------------------------------
# Defining loop parameters
# AKA "Configuring the space-time continuum"
#------------------------------------------------------------------------------
dt = 0.01 #time step
t = 0
print 'dt = ',dt
q1a = [mat_vec(q1)]
p1a = [mat_vec(p1)]
s1a = [mat_vec(np.dot(qmatcon(p1),q1))]
q2a = [mat_vec(q2)]
p2a = [mat_vec(p2)]
s2a = [mat_vec(np.dot(qmatcon(p2),q2))]
time = [t]
swaptime = 0.8785/a #determined 'experimentally'
#------------------------------------------------------------------------------
# Checking conserved quantity
# AKA "might as well..."
#------------------------------------------------------------------------------
con = [] #checking to see if our conserved quantity is actually conserved
def conserved(q1,q2,p1,p2):
return np.dot(qmatcon(p1),q1) + np.dot(qmatcon(p2),q2)
#------------------------------------------------------------------------------
# Creating the time loop
# AKA "Let 'er rip"
#------------------------------------------------------------------------------
runtime = checktime()
while t<swaptime:
'''
This integrator works on an RK4 algorithm.
For a good explaination, see wikipedia
https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
note that the algorithm is modified slightly to fit our function
'''
q1k1 = q1_dot(q1,q2,p1,p2,a)
q2k1 = q1_dot(q2,q1,p2,p1,a)
p1k1 = p1_dot(q1,q2,q1k1,q2k1,a,w)
p2k1 = p1_dot(q2,q1,q2k1,q1k1,a,w)
q1k2 = q1_dot(q1+q1k1*dt/2.,q2+q2k1*dt/2.,p1+p1k1*dt/2.,p2+p2k1*dt/2.,a)
q2k2 = q1_dot(q2+q2k1*dt/2.,q1+q1k1*dt/2.,p2+p2k1*dt/2.,p1+p1k1*dt/2.,a)
p1k2 = p1_dot(q1+q1k1*dt/2.,q2+q2k1*dt/2.,q1k1,q2k1,a,w)
p2k2 = p1_dot(q2+q2k1*dt/2.,q1+q1k1*dt/2.,q2k1,q1k1,a,w)
q1k3 = q1_dot(q1+q1k2*dt/2.,q2+q2k2*dt/2.,p1+p1k2*dt/2.,p2+p2k2*dt/2.,a)
q2k3 = q1_dot(q2+q2k2*dt/2.,q1+q1k2*dt/2.,p2+p2k2*dt/2.,p1+p1k2*dt/2.,a)
p1k3 = p1_dot(q1+q1k2*dt/2.,q2+q2k2*dt/2.,q1k1,q2k1,a,w)
p2k3 = p1_dot(q2+q2k2*dt/2.,q1+q1k2*dt/2.,q2k1,q1k1,a,w)
q1k4 = q1_dot(q1+q1k3*dt,q2+q2k3*dt,p1+p1k3*dt,p2+p2k3*dt,a)
q2k4 = q1_dot(q2+q2k3*dt,q1+q1k3*dt,p2+p2k3*dt,p1+p1k3*dt,a)
p1k4 = p1_dot(q1+q1k3*dt,q2+q2k3*dt,q1k1,q2k1,a,w)
p2k4 = p1_dot(q2+q2k3*dt,q1+q1k3*dt,q2k1,q1k1,a,w)
q1 += (q1k1 + 2*q1k2 + 2*q1k3 + q1k4)*dt/6.
q2 += (q2k1 + 2*q2k2 + 2*q2k3 + q2k4)*dt/6.
p1 += (p1k1 + 2*p1k2 + 2*p1k3 + p1k4)*dt/6.
p2 += (p2k1 + 2*p2k2 + 2*p2k3 + p2k4)*dt/6.
t += dt
q1a.append(mat_vec(q1))
p1a.append(mat_vec(p1))
s1a.append(mat_vec(np.dot(qmatcon(p1),q1)))
q2a.append(mat_vec(q2))
p2a.append(mat_vec(p2))
s2a.append(mat_vec(np.dot(qmatcon(p2),q2)))
time.append(t)
runtime = checktime() - runtime
q1a = np.array(q1a)
q2a = np.array(q2a)
p1a = np.array(p1a)
p2a = np.array(p2a)
s1a = np.array(s1a)
s2a = np.array(s2a)
time = np.array(time)
#------------------------------------------------------------------------------
# Plotting things
# AKA "Can we see it now?"
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
def vecplot(thing,time,name):
plt.clf()
plt.title(name)
plt.plot(time,thing[:,0],label='Real', color = 'black')
plt.plot(time,thing[:,1],label='i', color = 'red')
plt.plot(time,thing[:,2],label='j', color = 'green')
plt.plot(time,thing[:,3],label='k', color = 'blue')
plt.legend(loc='best')
plt.xlim([time[0], time[-1]])
plt.grid()
plt.show()
def scalarplot(thing,time,name):
plt.clf()
plt.title(name)
plt.plot(time,thing,color = 'black')
plt.grid()
plt.xlim([time[0], time[-1]])
plt.show()
vecplot(q1a,time,'$q_1$')
vecplot(q2a,time,'$q_2$')
vecplot(p1a,time,'$p_1$')
vecplot(p2a,time,'$p_2$')
vecplot(s1a,time,'$p_1^{\dagger}q_1$')
vecplot(s2a,time,'$p_2^{\dagger}q_2$')
print 'Initial:'
print 'q1 = ', q1a[0]
print 'q2 = ', q2a[0]
print 'p1 = ', p1a[0]
print 'p2 = ', p2a[0]
print 's1 = ', s1a[0]
print 's2 = ', s2a[0]
print 'Final:'
print 'q1 = ', q1a[-1]
print 'q2 = ', q2a[-1]
print 'p1 = ', p1a[-1]
print 'p2 = ', p2a[-1]
print 's1 = ', s1a[-1]
print 's2 = ', s2a[-1]
print 'runtime is',runtime, 'seconds' | mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_qt4agg.py | 10 | 2177 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_qt5agg import FigureCanvasQTAggBase as _FigureCanvasQTAggBase
from .backend_agg import FigureCanvasAgg
from .backend_qt4 import QtCore
from .backend_qt4 import FigureManagerQT
from .backend_qt4 import FigureCanvasQT
from .backend_qt4 import NavigationToolbar2QT
##### not used
from .backend_qt4 import show
from .backend_qt4 import draw_if_interactive
from .backend_qt4 import backend_version
######
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt4agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAggBase(_FigureCanvasQTAggBase):
def __init__(self, figure):
self._agg_draw_pending = False
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasQTAggBase.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = []
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| gpl-3.0 |
nixphix/ml-projects | sentiment_analysis/twitter_sentiment_analysis-jallikattu/code/twitter_sentiment_analysis-jallikattu_FINAL.py | 1 | 11757 |
# coding: utf-8
# ### Sentiment Analysis on "Jallikattu" with Twitter Data Feed <h3 style="color:red;">#DataScienceForSocialCause</h3>
#
# Twitter is flooded with Jallikattu issue, let us find peoples sentiment with Data Science tools. Following is the approach
# * Register a Twitter API handle for data feed
# * Pull out tweets on search query 'jallikattu'
# * Using NLP packages find the sentiment of the tweet (Positive, Neutral or Negative)
# * Plot pie chart of the sentiment
# * Plot a masked word cloud of tags used
#
# Finall output we expect is a masked word cloud of popular tags used in twitter with font size propotional to the frequency of use. Let's dive in ...
# ### Loading necessary packages
#
# In particular we will be using tweepy to register an api handle with twitter and get the data feed. [Tweepy Document](http://docs.tweepy.org/en/v3.5.0/)
# TextBlob package to determine the sentiment of the tweets. [TextBlob Document](https://textblob.readthedocs.io/en/dev/)
#
#
# In[1]:
# import tweepy for twitter datastream and textblob for processing tweets
import tweepy
import textblob
# wordcloud package is used to produce the cool masked tag cloud above
from wordcloud import WordCloud
# pickle to serialize/deserialize python objects
import pickle
# regex package to extract hasttags from tweets
import re
# os for loading files from local system, matplotlib, np and PIL for ploting
from os import path
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
# ### We will create a Twitter API handle for fetching data
#
# * Inorder to qualify for a Twitter API handle you need to be a **`Phone Verified`** Twitter user.
# 1. Goto Twitter settings page [twitter.com/settings/account](https://twitter.com/settings/account)
# 2. Choose Mobile tab on left pane, then enter your phone number and verify by OTP
# 3. Now you should be able to register new API handle for your account for `programmatic tweeting`
#
#
# * Now goto Twitter [Application Management](https://apps.twitter.com/) page
# * Click *Create New App*button
# 1. Enter a Unique App name(global namespace), you might have to try few time to get it correct
# 2. Description can be anything you wish
# 3. website can be some <yourname>.com, you dont really have to own the domain
# 4. Leave the callback URL empty, agree to the terms and condition unconditionally
# 5. Click create
#
#
# * You can find the api credentials in [Application Management](https://apps.twitter.com/) consol
# * Choose the App and goto *keys and access tokens* tab to get API_KEY, API_SECRET, ACCESS_TOKEN and ACCESS_TOKEN_SECRET
#
# #### RUN THE CODE BLOCK BELOW ONLY ON FIRST TIME YOU CONFIGURE THE TWITTER API
# In[ ]:
# make sure to exclued this folder in git ignore
path_to_cred_file = path.abspath('../restricted/api_credentials.p')
# we will store twitter handle credentials in a pickle file (object de-serialization)
# code for pickling credentials need to be run only once during initial configuration
# fill the following dictionary with your twitter credentials
twitter_credentials = {'api_key':'API_KEY', 'api_secret':'API_SECRET', 'access_token':'ACCESS_TOKEN', 'access_token_secret':'ACCESS_TOKEN_SECRET'}
pickle.dump(twitter_credentials,open(path_to_cred_file, "wb"))
print("Pickled credentials saved to :\n"+path_to_cred_file+"\n")
print("\n".join(["{:20} : {}".format(key,value) for key,value in twitter_credentials.items()]))
# #### From second run you can load the credentials securely form stored file
# If you want to check the credentials uncomment the last line in below code block
# In[2]:
# make sure to exclued this folder in git ignore
path_to_cred_file = path.abspath('../restricted/api_credentials.p')
# load saved twitter credentials
twitter_credentials = pickle.load(open(path_to_cred_file,'rb'))
#print("\n".join(["{:20} : {}".format(key,value) for key,value in twitter_credentials.items()]))
# ### Creating an Open Auth Instance
# With the created api and token we will open an open auth instance to authenticate our twitter account.
#
# If you feel that your twitter api credentials have been compromised you can just generate a new set of access token-secret pair, access token is like RSA to authenticate your api key.
# In[3]:
# lets create an open authentication handler and initialize it with our twitter handlers api key
auth = tweepy.OAuthHandler(twitter_credentials['api_key'],twitter_credentials['api_secret'])
# access token is like password for the api key,
auth.set_access_token(twitter_credentials['access_token'],twitter_credentials['access_token_secret'])
# ### Twitter API Handle
#
# Tweepy comes with a Twitter API wrapper class called 'API', passing the open auth instance to this API creates a live Twitter handle to our account.
#
#
# **ATTENTION: Please beware that this is a handle you your own account not any pseudo account, if you tweet something with this it will be your tweet** This is the reason I took care not to expose my api credentials, if you expose anyone can mess up your Twitter account.
#
#
# Let's open the twitter handle and print the Name and Location of the twitter account owner, you should be seeing your name.
# In[4]:
# lets create an instance of twitter api wrapper
api = tweepy.API(auth)
# lets do some self check
user = api.me()
print("{}\n{}".format(user.name,user.location))
# ### Inspiration for this Project
# I drew inspiration for this project from the ongoing issue on traditional bull fighting AKA *Jallikattu*. Here I'm trying read pulse of the people based on tweets.
#
# We are searching for key word *Jallikattu* in Twitters public tweets, in the retured search result we are taking 150 tweets to do our **Sentiment Analysis**. Please dont go for large number of tweets there is an upper limit of 450 tweets, for more on api rate limits checkout [Twitter Developer Doc](https://dev.twitter.com/rest/public/rate-limits).
# In[5]:
# now lets get some data to check the sentiment on it
# lets search for key word jallikattu and check the sentiment on it
query = 'jallikattu'
tweet_cnt = 150
peta_tweets = api.search(q=query,count=tweet_cnt)
# ### Processing Tweets
#
# Once we get the tweets, we will iterate through the tweets and do following oprations
# 1. Pass the tweet text to TextBlob to process the tweet
# 2. Processed tweets will have two attributes
# * Polarity which is a numerical value between -1 to 1, the sentiment of the text can be infered from this.
# * Subjectivity this shows wheather the text is stated as a fact or an opinion, value ranges from 0 to 1
# 3. For each tweet we will find sentiment of the text (positive, neutral or negative) and update a counter variable accordingly, this counter is later ploted as a **pie chart**.
# 4. Then we pass the tweet text to a regular expression to extract hash tags, which we later use to create an awesome **word cloud visualization**.
# In[6]:
# lets go over the tweets
sentiment_polarity = [0,0,0]
tags = []
for tweet in peta_tweets:
processed_tweet = textblob.TextBlob(tweet.text)
polarity = processed_tweet.sentiment.polarity
upd_index = 0 if polarity > 0 else (1 if polarity == 0 else 2)
sentiment_polarity[upd_index] = sentiment_polarity[upd_index]+1
tags.extend(re.findall(r"#(\w+)", tweet.text))
#print(tweet.text)
#print(processed_tweet.sentiment,'\n')
sentiment_label = ['Positive','Neutral','Negative']
#print("\n".join(["{:8} tweets count {}".format(s,val) for s,val in zip(sentiment_label,sentiment_polarity)]))
# plotting sentiment pie chart
colors = ['yellowgreen', 'gold', 'coral']
# lets explode the positive sentiment for visual appeal
explode = (0.1, 0, 0)
plt.pie(sentiment_polarity,labels=sentiment_label,colors=colors,explode=explode,shadow=True,autopct='%1.1f%%')
plt.axis('equal')
plt.legend(bbox_to_anchor=(1.3,1))
plt.title('Twitter Sentiment on \"'+query+'\"')
plt.show()
# ### Sentiment Analysis
#
# We can see that majority is neutral which is contributed by
# 1. Tweets with media only(photo, video)
# 2. Tweets in regional language. Textblob do not work on our indian languages.
# 3. Some tweets contains only stop words or the words that do not give any positive or negative perspective.
# 4. Polarity is calculated by the number of positive words like "great, awesome, etc." or negative words like "hate, bad, etc"
#
# One more point to note is that TextBlob is not a complete NLP package it does not do context aware search, such sophisticated deep learing abilities are available only with likes of Google.
# In[7]:
# lets process the hash tags in the tweets and make a word cloud visualization
# normalizing tags by converting all tags to lowercase
tags = [t.lower() for t in tags]
# get unique count of tags to take count for each
uniq_tags = list(set(tags))
tag_count = []
# for each unique hash tag take frequency of occurance
for tag in uniq_tags:
tag_count.append((tag,tags.count(tag)))
# lets print the top five tags
tag_count =sorted(tag_count,key=lambda x:-x[1])[:5]
print("\n".join(["{:8} {}".format(tag,val) for tag,val in tag_count]))
# ### Simple Word Cloud with Twitter #tags
#
# Let us viualize the tags used in for Jallikattu by creating a tag cloud. The wordcloud package takes a single string of tags separated by whitespace. We will concatinate the tags and pass it to generate method to create a tag cloud image.
# In[8]:
# we will create a vivid tag cloud visualization
# creating a single string of texts from tags, the tag's font size is proportional to its frequency
text = " ".join(tags)
# this generates an image from the long string, if you wish you may save it to local
wc = WordCloud().generate(text)
# we will display the image with matplotlibs image show, removed x and y axis ticks
plt.imshow(wc)
plt.axis("off")
plt.show()
# ### Masked Word Cloud
#
# The tag cloud can be masked using a grascale stencil image the wordcloud package neatly arranges the word in side the mask image. I have supreimposed generated word cloud image on to the mask image to provide a detailing otherwise the background of the word cloud will be white and it will appeare like words are hanging in space instead.
#
#
# Inorder to make the image superimposing work well, we need to manipulate image transparency using image alpha channel. If you look at the visual only fine detail of mask image is seen in the tag cloud this is bacause word cloud is layed on mask image and the transparency of word cloud image is 90% so only 10% of mask image is seen.
# In[11]:
# we can also create a masked word cloud from the tags by using grayscale image as stencil
# lets load the mask image from local
bull_mask = np.array(Image.open(path.abspath('../asset/bull_mask_1.jpg')))
wc_mask = WordCloud(background_color="white", mask=bull_mask).generate(text)
mask_image = plt.imshow(bull_mask, cmap=plt.cm.gray)
word_cloud = plt.imshow(wc_mask,alpha=0.9)
plt.axis("off")
plt.title("Twitter Hash Tag Word Cloud for "+query)
plt.show()
# The tag cloud marks the key moments like the call for protest in Chennai Marina, Alanganallur. Also shows one of a leading actors support for the cause and calls for ban on peta.
#
# This code will give different output over time as new tweet are added in timeline and old ones are pushed down,
# #### Thank you for showing intrest in my work
# If you liked it and want to be notified of my future work follow me on
#
#
# [Knowme](https://knome.ultimatix.net/users/286632-prabakaran-k)
#
#
# [@iPrabakaran](https://twitter.com/iPrabakaran) Twitter
#
#
# [GitHub](https://github.com/nixphix)
| mit |
pdamodaran/yellowbrick | tests/checks.py | 1 | 4707 | # tests.checks
# Performs checking that visualizers adhere to Yellowbrick conventions.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Mon May 22 11:18:06 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: checks.py [4131cb1] benjamin@bengfort.com $
"""
Performs checking that visualizers adhere to Yellowbrick conventions.
"""
##########################################################################
## Imports
##########################################################################
import sys
sys.path.append("..")
import numpy as np
import matplotlib.pyplot as plt
from yellowbrick.base import ModelVisualizer, ScoreVisualizer
from yellowbrick.classifier.base import ClassificationScoreVisualizer
from yellowbrick.cluster.base import ClusteringScoreVisualizer
from yellowbrick.features.base import FeatureVisualizer, DataVisualizer
from yellowbrick.regressor.base import RegressionScoreVisualizer
from yellowbrick.text.base import TextVisualizer
##########################################################################
## Checking runable
##########################################################################
def check_visualizer(Visualizer):
"""
Check if visualizer adheres to Yellowbrick conventions.
This function runs an extensive test-suite for input validation, return
values, exception handling, and more. Additional tests for scoring or
tuning visualizers will be run if the Visualizer clss inherits from the
corresponding object.
"""
name = Visualizer.__name__
for check in _yield_all_checks(name, Visualizer):
check(name, Visualizer)
##########################################################################
## Generate the specific per-visualizer checking
##########################################################################
def _yield_all_checks(name, Visualizer):
"""
Composes the checks required for the specific visualizer.
"""
# Global Checks
yield check_instantiation
yield check_estimator_api
# Visualizer Type Checks
if issubclass(Visualizer, RegressionScoreVisualizer):
for check in _yield_regressor_checks(name, Visualizer):
yield check
if issubclass(Visualizer, ClassificationScoreVisualizer):
for check in _yield_classifier_checks(name, Visualizer):
yield check
if issubclass(Visualizer, ClusteringScoreVisualizer):
for check in _yield_clustering_checks(name, Visualizer):
yield check
if issubclass(Visualizer, FeatureVisualizer):
for check in _yield_feature_checks(name, Visualizer):
yield check
if issubclass(Visualizer, TextVisualizer):
for check in _yield_text_checks(name, Visualizer):
yield check
# Other checks
def _yield_regressor_checks(name, Visualizer):
"""
Checks for regressor visualizers
"""
pass
def _yield_classifier_checks(name, Visualizer):
"""
Checks for classifier visualizers
"""
pass
def _yield_clustering_checks(name, Visualizer):
"""
Checks for clustering visualizers
"""
pass
def _yield_feature_checks(name, Visualizer):
"""
Checks for feature visualizers
"""
pass
def _yield_text_checks(name, Visualizer):
"""
Checks for text visualizers
"""
pass
##########################################################################
## Checking Functions
##########################################################################
def check_instantiation(name, Visualizer, args, kwargs):
# assert that visualizers can be passed an axes object.
ax = plt.gca()
viz = Visualizer(*args, **kwargs)
assert viz.ax == ax
def check_estimator_api(name, Visualizer):
X = np.random.rand((5, 10))
y = np.random.randint(0,2, 10)
# Ensure fit returns self.
viz = Visualizer()
self = viz.fit(X, y)
assert viz == self
if __name__ == '__main__':
import sys
sys.path.append("..")
from yellowbrick.classifier import *
from yellowbrick.cluster import *
from yellowbrick.features import *
from yellowbrick.regressor import *
from yellowbrick.text import *
visualizers = [
ClassBalance, ClassificationReport, ConfusionMatrix, ROCAUC,
KElbowVisualizer, SilhouetteVisualizer,
ScatterVisualizer, JointPlotVisualizer, Rank2D, RadViz, ParallelCoordinates,
AlphaSelection, ManualAlphaSelection,
PredictionError, ResidualsPlot,
TSNEVisualizer, FreqDistVisualizer, PosTagVisualizer
]
for visualizer in visualizers:
check_visualizer(visualizer)
| apache-2.0 |
hakonsbm/nest-simulator | extras/ConnPlotter/examples/connplotter_tutorial.py | 18 | 27730 | # -*- coding: utf-8 -*-
#
# connplotter_tutorial.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# !========================
# ! ConnPlotter: A Tutorial
# !========================
# !
# ! :Author: Hans Ekkehard Plesser
# ! :Institution: Norwegian University of Life Sciences, Simula
# ! Research Laboratory, RIKEN Brain Sciences Institute
# ! :Version: 0.7
# ! :Date: 1 December 2009
# ! :Copyright: Hans Ekkehard Plesser
# ! :License: Creative Commons Attribution-Noncommercial-Share Alike License
# ! v 3.0
# !
# ! :Note: For best results, you should run this script with PyReport by
# ! Gael Varoquaux, available from
# ! http://gael-varoquaux.info/computers/pyreport/
# !
# ! Please set using_pyreport to True if you want to run the
# ! script through pyreport. Otherwise, figures will not be captured
# ! correctly.
using_pyreport = False
# ! Introduction
# !=============
# ! This tutorial gives a brief introduction to the ConnPlotter
# ! toolbox. It is by no means complete.
# ! Avoid interactive backend when using pyreport
if using_pyreport:
import matplotlib
matplotlib.use("Agg")
# ! Import pylab to call pylab.show() so that pyreport
# ! can capture figures created. Must come before import
# ! ConnPlotter so we get the correct show().
import pylab
# ! If not using pyreport, disable pylab.show() until we reach end of script
if not using_pyreport:
pylab_show = pylab.show
def nop(s=None):
pass
pylab.show = nop
# ! Import ConnPlotter and its examples
import ConnPlotter as cpl
import ConnPlotter.examples as ex
# ! Turn of warnings about resized figure windows
import warnings
warnings.simplefilter("ignore")
# ! Define a helper function to show LaTeX tables on the fly
def showTextTable(connPattern, fileTrunk):
"""
Shows a Table of Connectivity as textual table.
Arguments:
connPattern ConnectionPattern instance
fileTrunk Eventual PNG image will be fileTrunk.png
"""
import subprocess as subp # to call LaTeX etc
import os # to remove files
# Write to LaTeX file so we get a nice textual representation
# We want a complete LaTeX document, so we set ``standalone``
# to ``True``.
connPattern.toLaTeX(file=fileTrunk + '.tex', standalone=True,
enumerate=True)
# Create PDF, crop, and convert to PNG
try:
devnull = open('/dev/null', 'w')
subp.call(['pdflatex', fileTrunk], stdout=devnull, stderr=subp.STDOUT)
# need wrapper, since pdfcrop does not begin with #!
subp.call(['pdfcrop ' + fileTrunk + '.pdf ' + fileTrunk + '-crop.pdf'],
shell=True,
stdout=devnull, stderr=subp.STDOUT)
devnull.close()
os.rename(fileTrunk + '-crop.pdf', fileTrunk + '.pdf')
for suffix in ('.tex', '-crop.pdf', '.png', '.aux', '.log'):
if os.path.exists(fileTrunk + suffix):
os.remove(fileTrunk + suffix)
except:
raise Exception('Could not create PDF Table.')
# ! Simple network
# ! ==============
# ! This is a simple network with two layers A and B; layer B has two
# ! populations, E and I. On the NEST side, we use only synapse type
# ! ``static_synapse``. ConnPlotter then infers that synapses with positive
# ! weights should have type ``exc``, those with negative weight type ``inh``.
# ! Those two types are know to ConnPlotter.
# ! Obtain layer, connection and model list from the example set
s_layer, s_conn, s_model = ex.simple()
# ! Create Connection Pattern representation
s_cp = cpl.ConnectionPattern(s_layer, s_conn)
# ! Show pattern as textual table (we cheat a little and include PDF directly)
showTextTable(s_cp, 'simple_tt')
# $ \centerline{\includegraphics{simple_tt.pdf}}
# ! Show pattern in full detail
# ! ---------------------------
# ! A separate patch is shown for each pair of populations.
# !
# ! - Rows represent senders, columns targets.
# ! - Layer names are given to the left/above, population names to the right
# ! and below.
# ! - Excitatory synapses shown in blue, inhibitory in red.
# ! - Each patch has its own color scale.
s_cp.plot()
pylab.show()
# ! Let us take a look at what this connection pattern table shows:
# !
# ! - The left column, with header "A", is empty: The "A" layer receives
# ! no input.
# ! - The right column shows input to layer "B"
# !
# ! * The top row, labeled "A", has two patches in the "B" column:
# !
# ! + The left patch shows relatively focused input to the "E" population
# ! in layer "B" (first row of "Connectivity" table).
# ! + The right patch shows wider input to the "I" population in layer
# ! "B" (second row of "Connectivity" table).
# ! + Patches are red, indicating excitatory connections.
# ! + In both cases, mask are circular, and the product of connection
# ! weight and probability is independent of the distance between sender
# ! and target neuron.
# !
# ! * The grey rectangle to the bottom right shows all connections from
# ! layer "B" populations to layer "B" populations. It is subdivided into
# ! two rows and two columns:
# !
# ! + Left column: inputs to the "E" population.
# ! + Right column: inputs to the "I" population.
# ! + Top row: projections from the "E" population.
# ! + Bottom row: projections from the "I" population.
# ! + There is only one type of synapse for each sender-target pair,
# ! so there is only a single patch per pair.
# ! + Patches in the top row, from population "E" show excitatory
# ! connections, thus they are red.
# ! + Patches in the bottom row, from population "I" show inhibitory
# ! connections, thus they are blue.
# ! + The patches in detail are:
# !
# ! - **E to E** (top-left, row 3+4 in table): two rectangular
# ! projections at 90 degrees.
# ! - **E to I** (top-right, row 5 in table): narrow gaussian projection.
# ! - **I to E** (bottom-left, row 6 in table): wider gaussian projection
# ! - **I to I** (bottom-right, row 7 in table): circular projection
# ! covering entire layer.
# !
# ! - **NB:** Color scales are different, so one **cannot** compare connection
# ! strengths between patches.
# ! Full detail, common color scale
# ! -------------------------------
s_cp.plot(globalColors=True)
pylab.show()
# ! This figure shows the same data as the one above, but now all patches use
# ! a common color scale, so full intensity color (either red or blue)
# ! indicates the strongest connectivity. From this we see that
# !
# ! - A to B/E is stronger than A to B/I
# ! - B/E to B/I is the strongest of all connections at the center
# ! - B/I to B/E is stronger than B/I to B/I
# ! Aggregate by groups
# ! -------------------
# ! For each pair of population groups, sum connections of the same type
# ! across populations.
s_cp.plot(aggrGroups=True)
pylab.show()
# ! In the figure above, all excitatory connections from B to B layer have been
# ! combined into one patch, as have all inhibitory connections from B to B.
# ! In the upper-right corner, all connections from layer A to layer B have
# ! been combined; the patch for inhibitory connections is missing, as there
# ! are none.
# ! Aggregate by groups and synapse models
# ! --------------------------------------
s_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! When aggregating across synapse models, excitatory and inhibitory
# ! connections are combined. By default, excitatory connections are weights
# ! with +1, inhibitory connections with -1 in the sum. This may yield kernels
# ! with positive and negative values. They are shown on a red-white-blue scale
# ! as follows:
# !
# ! - White always represents 0.
# ! - Positive values are represented by increasingly saturated red.
# ! - Negative values are represented by increasingly saturated blue.
# ! - Colorscales are separate for red and blue:
# !
# ! * largest positive value: fully saturated red
# ! * largest negative value: fully saturated blue
# !
# ! - Each patch has its own colorscales.
# ! - When ``aggrSyns=True`` is combined with ``globalColors=True``,
# ! all patches use the same minimum and maximum in their red and blue
# ! color scales. The the minimum is the negative of the maximum, so that
# ! blue and red intesities can be compared.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True)
pylab.show()
# ! - We can explicitly set the limits of the color scale; if values exceeding
# ! the limits are present, this is indicated by an arrowhead at the end of
# ! the colorbar. User-defined color limits need not be symmetric about 0.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True,
colorLimits=[-2, 3])
pylab.show()
# ! Save pattern to file
# ! --------------------
# s_cp.plot(file='simple_example.png')
# ! This saves the detailed diagram to the given file. If you want to save
# ! the pattern in several file formats, you can pass a tuple of file names,
# ! e.g., ``s_cp.plot(file=('a.eps', 'a.png'))``.
# !
# ! **NB:** Saving directly to PDF may lead to files with artifacts. We
# ! recommend to save to EPS and the convert to PDF.
# ! Build network in NEST
# ! ---------------------
import nest
import nest.topology as topo
# ! Create models
for model in s_model:
nest.CopyModel(model[0], model[1], model[2])
# ! Create layers, store layer info in Python variable
for layer in s_layer:
exec ('%s = topo.CreateLayer(layer[1])' % layer[0])
# ! Create connections, need to insert variable names
for conn in s_conn:
eval('topo.ConnectLayers(%s,%s,conn[2])' % (conn[0], conn[1]))
nest.Simulate(10)
# ! **Ooops:*** Nothing happened? Well, it did, but pyreport cannot capture the
# ! output directly generated by NEST. The absence of an error message in this
# ! place shows that network construction and simulation went through.
# ! Inspecting the connections actually created
# ! :::::::::::::::::::::::::::::::::::::::::::
# ! The following block of messy and makeshift code plots the targets of the
# ! center neuron of the B/E population in the B/E and the B/I populations.
B_top = nest.GetStatus(RG, 'topology')[0]
ctr_id = topo.GetElement(RG,
[int(B_top['rows'] / 2), int(B_top['columns'] / 2)])
# find excitatory element in B
E_id = [gid for gid in ctr_id
if nest.GetStatus([gid], 'model')[0] == 'E']
# get all targets, split into excitatory and inhibitory
alltgts = nest.GetStatus(
nest.GetConnections(E_id, synapse_model='static_synapse'), 'target')
Etgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'E']
Itgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'I']
# obtain positions of targets
Etpos = tuple(zip(*topo.GetPosition(Etgts)))
Itpos = tuple(zip(*topo.GetPosition(Itgts)))
# plot excitatory
pylab.clf()
pylab.subplot(121)
pylab.scatter(Etpos[0], Etpos[1])
ctrpos = pylab.array(topo.GetPosition(E_id)[0])
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99,
fc='r', alpha=0.4, ec='none'))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.4, -0.2)), 0.8, 0.4, zorder=1,
fc='none', ec='r', lw=3))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.2, -0.4)), 0.4, 0.8, zorder=1,
fc='none', ec='r', lw=3))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.5, -0.5)), 1.0, 1.0, zorder=1,
fc='none', ec='k', lw=3))
ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5],
xticks=[], yticks=[])
# plot inhibitory
pylab.subplot(122)
pylab.scatter(Itpos[0], Itpos[1])
ctrpos = topo.GetPosition(E_id)[0]
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99,
fc='r', alpha=0.4, ec='none'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.2, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.3, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.5, zorder=2,
fc='none', ec='r', lw=3))
ax.add_patch(pylab.Rectangle((-0.5, -0.5), 1.0, 1.0, zorder=1,
fc='none', ec='k', lw=3))
ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5],
xticks=[], yticks=[])
pylab.show()
# ! Thick red lines mark the mask, dashed red lines to the right one, two and
# ! three standard deviations. The sender location is marked by the red spot
# ! in the center. Layers are 40x40 in size.
# ! A more complex network
# ! ======================
# !
# ! This network has layers A and B, with E and I populations in B. The added
# ! complexity comes from the fact that we now have four synapse types: AMPA,
# ! NMDA, GABA_A and GABA_B. These synapse types are known to ConnPlotter.
# ! Setup and tabular display
c_layer, c_conn, c_model = ex.complex()
c_cp = cpl.ConnectionPattern(c_layer, c_conn)
showTextTable(c_cp, 'complex_tt')
# $ \centerline{\includegraphics{complex_tt.pdf}}
# ! Pattern in full detail
# ! ----------------------
c_cp.plot()
pylab.show()
# ! Note the following differences to the simple pattern case:
# !
# ! - For each pair of populations, e.g., B/E as sender and B/E as target,
# ! we now have two patches representing AMPA and NMDA synapse for the E
# ! population, GABA_A and _B for the I population.
# ! - Colors are as follows:
# !
# ! :AMPA: red
# ! :NMDA: orange
# ! :GABA_A: blue
# ! :GABA_B: purple
# ! - Note that the horizontal rectangular pattern (table line 3) describes
# ! AMPA synapses, while the vertical rectangular pattern (table line 4)
# ! describes NMDA synapses.
# ! Full detail, common color scale
# ! -------------------------------
c_cp.plot(globalColors=True)
pylab.show()
# ! As above, but now with a common color scale.
# ! **NB:** The patch for the B/I to B/I connection may look empty, but it
# ! actually shows a very light shade of red. Rules are as follows:
# !
# ! - If there is no connection between two populations, show the grey layer
# ! background.
# ! - All parts of the target layer that are outside the mask or strictly zero
# ! are off-white.
# ! - If it looks bright white, it is a very diluted shade of the color for the
# ! pertaining synpase type.
# ! Full detail, explicit color limits
# ! ----------------------------------
c_cp.plot(colorLimits=[0, 1])
pylab.show()
# ! As above, but the common color scale is now given explicitly.
# ! The arrow at the right end of the color scale indicates that the values
# ! in the kernels extend beyond +1.
# ! Aggregate by synapse models
# ! -----------------------------
# ! For each population pair, connections are summed across
# ! synapse models.
# !
# ! - Excitatory kernels are weighted with +1, inhibitory kernels with -1.
# ! - The resulting kernels are shown on a color scale ranging from red
# ! (inhibitory) via white (zero) to blue (excitatory).
# ! - Each patch has its own color scale
c_cp.plot(aggrSyns=True)
pylab.show()
# !
# ! - AMPA and NMDA connections from B/E to B/E are now combined to form a
# ! cross.
# ! - GABA_A and GABA_B connections from B/I to B/E are two concentric spots.
# ! Aggregate by population group
# ! ------------------------------
c_cp.plot(aggrGroups=True)
pylab.show()
# ! This is in many ways orthogonal to aggregation by synapse model:
# ! We keep synapse types separat, while we combine across populations. Thus,
# ! we have added the horizonal bar (B/E to B/E, row 3) with the spot
# ! (B/E to B/I, row 5).
# ! Aggregate by population group and synapse model
# ! -----------------------------------------------------------------
c_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! All connection are combined for each pair of sender/target layer.
# ! CPTs using the total charge deposited (TCD) as intensity
# ! -----------------------------------------------------------
# ! TCD-based CPTs are currently only available for the ht_neuron, since
# ! ConnPlotter does not know how to obtain \int g(t) dt from NEST for other
# ! conductance-based model neurons.
# ! We need to create a separate ConnectionPattern instance for each membrane
# ! potential we want to use in the TCD computation
c_cp_75 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-75.0)
c_cp_45 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-45.0)
# ! In order to obtain a meaningful comparison between both membrane
# ! potentials, we use the same global color scale.
# ! V_m = -75 mV
# ! ::::::::::::::
c_cp_75.plot(colorLimits=[0, 150])
pylab.show()
# ! V_m = -45 mV
# ! ::::::::::::::
c_cp_45.plot(colorLimits=[0, 150])
pylab.show()
# ! Note that the NMDA projection virtually vanishes for V_m=-75mV, but is very
# ! strong for V_m=-45mV. GABA_A and GABA_B projections are also stronger,
# ! while AMPA is weaker for V_m=-45mV.
# ! Non-Dale network model
# ! ======================
# ! By default, ConnPlotter assumes that networks follow Dale's law, i.e.,
# ! either make excitatory or inhibitory connections. If this assumption
# ! is violated, we need to inform ConnPlotter how synapse types are grouped.
# ! We look at a simple example here.
# ! Load model
nd_layer, nd_conn, nd_model = ex.non_dale()
# ! We specify the synapse configuration using the synTypes argument:
# !
# ! - synTypes is a tuple.
# ! - Each element in the tuple represents a group of synapse models
# ! - Any sender can make connections with synapses from **one group only**.
# ! - Each synapse model is specified by a ``SynType``.
# ! - The SynType constructor takes three arguments:
# !
# ! * The synapse model name
# ! * The weight to apply then aggregating across synapse models
# ! * The color to use for the synapse type
# !
# ! - Synapse names must be unique, and must form a superset of all synapse
# ! models in the network.
nd_cp = cpl.ConnectionPattern(nd_layer, nd_conn, synTypes=(
(cpl.SynType('exc', 1.0, 'b'), cpl.SynType('inh', -1.0, 'r')),))
showTextTable(nd_cp, 'non_dale_tt')
# $ \centerline{\includegraphics{non_dale_tt.pdf}}
nd_cp.plot()
pylab.show()
# ! Note that we now have red and blue patches side by side, as the same
# ! population can make excitatory and inhibitory connections.
# ! Configuring the ConnectionPattern display
# ! =========================================
# ! I will now show you a few ways in which you can configure how ConnPlotter
# ! shows connection patterns.
# ! User defined synapse types
# ! --------------------------
# !
# ! By default, ConnPlotter knows two following sets of synapse types.
# !
# ! exc/inh
# ! - Used automatically when all connections have the same synapse_model.
# ! - Connections with positive weight are assigned model exc, those with
# ! negative weight model inh.
# ! - When computing totals, exc has weight +1, inh weight -1
# ! - Exc is colored blue, inh red.
# !
# ! AMPA/NMDA/GABA_A/GABA_B
# ! - Used if the set of ``synapse_model`` s in the network is a subset of
# ! those four types.
# ! - AMPA/NMDA carry weight +1, GABA_A/GABA_B weight -1.
# ! - Colors are as follows:
# !
# ! :AMPA: blue
# ! :NMDA: green
# ! :GABA_A: red
# ! :GABA_B: magenta
# !
# !
# ! We saw a first example of user-defined synapse types in the non-Dale
# ! example above. In that case, we only changed the grouping. Here, I will
# ! demonstrate the effect of different ordering, weighting, and color
# ! specifications. We use the complex model from above as example.
# !
# ! *NOTE*: It is most likey a *bad idea* to change the colors or placement of
# ! synapse types. If everyone uses the same design rules, we will all be able
# ! to read each others figures much more easily.
# ! Placement of synapse types
# ! ::::::::::::::::::::::::::
# !
# ! The ``synTypes`` nested tuple defines the placement of patches for
# ! different synapse models. Default layout is
# !
# ! ====== ======
# ! AMPA NMDA
# ! GABA_A GABA_B
# ! ====== ======
# !
# ! All four matrix elements are shown in this layout only when using
# ! ``mode='layer'`` display. Otherwise, one or the other row is shown.
# ! Note that synapses that can arise from a layer simultaneously, must
# ! always be placed on one matrix row, i.e., in one group. As an example,
# ! we now invert placement, without any other changes:
cinv_syns = ((cpl.SynType('GABA_B', -1, 'm'), cpl.SynType('GABA_A', -1, 'r')),
(cpl.SynType('NMDA', 1, 'g'), cpl.SynType('AMPA', 1, 'b')))
cinv_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cinv_syns)
cinv_cp.plot()
pylab.show()
# ! Notice that on each row the synapses are exchanged compared to the original
# ! figure above. When displaying by layer, also the rows have traded place:
cinv_cp.plot(aggrGroups=True)
pylab.show()
# ! Totals are not affected:
cinv_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! Weighting of synapse types in ``totals`` mode
# ! :::::::::::::::::::::::::::::::::::::::::::::
# !
# ! Different synapses may have quite different efficacies, so weighting them
# ! all with +-1 when computing totals may give a wrong impression. Different
# ! weights can be supplied as second argument to SynTypes(). We return to the
# ! normal placement of synapses and
# ! create two examples with very different weights:
cw1_syns = ((cpl.SynType('AMPA', 10, 'b'), cpl.SynType('NMDA', 1, 'g')),
(cpl.SynType('GABA_A', -2, 'g'), cpl.SynType('GABA_B', -10, 'b')))
cw1_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw1_syns)
cw2_syns = ((cpl.SynType('AMPA', 1, 'b'), cpl.SynType('NMDA', 10, 'g')),
(cpl.SynType('GABA_A', -20, 'g'), cpl.SynType('GABA_B', -1, 'b')))
cw2_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw2_syns)
# ! We first plot them both in population mode
cw1_cp.plot(aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrSyns=True)
pylab.show()
# ! Finally, we plot them aggregating across groups and synapse models
cw1_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! Alternative colors for synapse patches
# ! ::::::::::::::::::::::::::::::::::::::
# ! Different colors can be specified using any legal color specification.
# ! Colors should be saturated, as they will be mixed with white. You may
# ! also provide a colormap explicitly. For this example, we use once more
# ! normal placement and weights. As all synapse types are shown in layer
# ! mode, we use that mode for display here.
cc_syns = (
(cpl.SynType('AMPA', 1, 'maroon'), cpl.SynType('NMDA', 1, (0.9, 0.5, 0))),
(cpl.SynType('GABA_A', -1, '0.7'), cpl.SynType('GABA_B', 1, pylab.cm.hsv)))
cc_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cc_syns)
cc_cp.plot(aggrGroups=True)
pylab.show()
# ! We get the following colors:
# !
# ! AMPA brownish
# ! NMDA golden orange
# ! GABA_A jet colormap from red (max) to blue (0)
# ! GABA_B grey
# !
# ! **NB:** When passing an explicit colormap, parts outside the mask will be
# ! shown to the "bad" color of the colormap, usually the "bottom" color in the
# ! map. To let points outside the mask appear in white, set the bad color of
# ! the colormap; unfortunately, this modifies the colormap.
pylab.cm.hsv.set_bad(cpl.colormaps.bad_color)
ccb_syns = (
(cpl.SynType('AMPA', 1, 'maroon'),
cpl.SynType('NMDA', 1, (0.9, 0.5, 0.1))),
(cpl.SynType('GABA_A', -1, '0.7'),
cpl.SynType('GABA_B', 1, pylab.cm.hsv)))
ccb_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=ccb_syns)
ccb_cp.plot(aggrGroups=True)
pylab.show()
# ! Other configuration options
# ! ---------------------------
# !
# ! Some more adjustments are possible by setting certain module properties.
# ! Some of these need to be set before ConnectionPattern() is constructed.
# !
# ! Background color for masked parts of each patch
cpl.colormaps.bad_color = 'cyan'
# ! Background for layers
cpl.plotParams.layer_bg = (0.8, 0.8, 0.0)
# ! Resolution for patch computation
cpl.plotParams.n_kern = 5
# ! Physical size of patches: longest egde of largest patch, in mm
cpl.plotParams.patch_size = 40
# ! Margins around the figure (excluding labels)
cpl.plotParams.margins.left = 40
cpl.plotParams.margins.top = 30
cpl.plotParams.margins.bottom = 15
cpl.plotParams.margins.right = 30
# ! Fonts for layer and population labels
import matplotlib.font_manager as fmgr
cpl.plotParams.layer_font = fmgr.FontProperties(family='serif', weight='bold',
size='xx-large')
cpl.plotParams.pop_font = fmgr.FontProperties('small')
# ! Orientation for layer and population label
cpl.plotParams.layer_orientation = {'sender': 'vertical', 'target': 60}
cpl.plotParams.pop_orientation = {'sender': 'horizontal', 'target': -45}
# ! Font for legend titles and ticks, tick placement, and tick format
cpl.plotParams.legend_title_font = fmgr.FontProperties(family='serif',
weight='bold',
size='large')
cpl.plotParams.legend_tick_font = fmgr.FontProperties(family='sans-serif',
weight='light',
size='xx-small')
cpl.plotParams.legend_ticks = [0, 1, 2]
cpl.plotParams.legend_tick_format = '%.1f pA'
cx_cp = cpl.ConnectionPattern(c_layer, c_conn)
cx_cp.plot(colorLimits=[0, 2])
pylab.show()
# ! Several more options are available to control the format of the color bars
# ! (they all are members of plotParams):
# ! * legend_location : if 'top', place synapse name atop color bar
# ! * cbwidth : width of single color bar relative to figure
# ! * margins.colbar : height of lower margin set aside for color bar, in mm
# ! * cbheight : height of single color bar relative to margins.colbar
# ! * cbwidth : width of single color bar relative to figure width
# ! * cbspace : spacing between color bars, relative to figure width
# ! * cboffset : offset of first color bar from left margin, relative to
# ! figure width
# ! You can also specify the width of the final figure, but this may not work
# ! well with on-screen display or here in pyreport. Width is in mm.
# ! Note that left and right margin combined are 70mm wide, so only 50mm are
# ! left for the actual CPT.
cx_cp.plot(fixedWidth=120)
pylab.show()
# ! If not using pyreport, we finally show and block
if not using_pyreport:
print("")
print("The connplotter_tutorial script is done. " +
"Call pylab.show() and enjoy the figures!")
print(
"You may need to close all figures manually " +
"to get the Python prompt back.")
print("")
pylab.show = pylab_show
| gpl-2.0 |
comprna/SUPPA | scripts/generate_boxplot_event.py | 1 | 5584 | # The next script will format a phenotype table (junctions, events, trasncripts...)
# for runnning FastQTL analysis
#This version is for formatting the SCLC phenotype
"""
@authors: Juan L. Trincado
@email: juanluis.trincado@upf.edu
generate_boxplot_event.py: Generates a boxplot with the PSI values, given which samples are in which conditions
"""
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import re
from argparse import ArgumentParser, RawTextHelpFormatter
description = \
"Description:\n\n" + \
"This script accept a phenotype table (junctions, events, transcripts...)\n" + \
"and a genotype table (mutations associated to K-mers or SMRs) and returns a formatted table\n" + \
"for using with FastQTL"
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter,
add_help=True)
parser.add_argument("-i", "--input", required=True,
help="Input file")
parser.add_argument("-e", "--event", required=True, type=str,
help="Event to plot")
parser.add_argument('-g', '--groups',
action="store",
required=True,
type=str,
nargs="*",
help="Ranges of column numbers specifying the replicates per condition. "
"Column numbers have to be continuous, with no overlapping or missing columns between them. "
"Ex: 1-3,4-6")
parser.add_argument('-c', '--conds',
action="store",
required=False,
default="0",
type=str,
nargs="*",
help="Name of each one of the conditions. Ex: Mutated,Non_mutated")
parser.add_argument("-o", "--output", required=True,
help="Output path")
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def main():
args = parser.parse_args()
input_file = args.input
event = args.event
groups = re.findall(r"[\w]+", args.groups[0])
output_path = args.output
# input_file = "/home/juanluis/Desktop/Work/Master_class/events.psi"
# event = "ENSG00000149554;SE:chr11:125496728-125497502:125497725-125499127:+"
# groups = ['1','3','4','6']
# output_path = "/home/juanluis/Desktop/Work/Master_class/"
try:
logger.info("Reading input file...")
dict_PSI = {}
cond = 1
success = False
file = open(input_file)
for line in file:
tokens = line.rstrip().split("\t")
if (tokens[0]==event):
success = True
for i,x in enumerate(groups):
if(i%2==1):
continue
PSI = []
samples = range(int(groups[i]),int(groups[i+1])+1)
#Get the PSI of this group of samples
for j in samples:
PSI.append(tokens[j])
dict_PSI[cond] = PSI
cond = cond + 1
break
if(success):
#Create the boxplot
data_to_plot = []
for key in dict_PSI.keys():
data_to_plot.append(list(map(float,dict_PSI[key])))
# Create a figure instance
fig = plt.figure(figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot, patch_artist=True, sym='')
# change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='.', color='#000000', alpha=0.7)
# Assign different colors
colors = ['lightblue', 'pink']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
for j in range(len(data_to_plot)):
y = data_to_plot[j]
x = np.random.normal(1 + j, 0.02, size=len(y))
plt.plot(x, y, 'ko', alpha=0.5)
# Custom x-axis labels if the user has input conditions
if (args.conds != "0"):
conditions = re.findall(r"[\w]+", args.conds[0])
ax.set_xticklabels(conditions)
# Leave just ticks in the bottom
ax.get_xaxis().tick_bottom()
ax.set_ylabel('PSI')
# Set the title
title = "Event: " + event
ax.set_title(title, fontsize=10)
# Add a horizontal grid to the plot,
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# Set the limits for the y axes
ax.set_ylim([-0.05, 1.05])
# Save the figure
output_path = output_path + "/" + event + ".png"
logger.info("Created " + output_path)
fig.savefig(output_path, bbox_inches='tight')
else:
logger.info("Event not found.")
logger.info("Done.")
exit(0)
except Exception as error:
logger.error(repr(error))
logger.error("Aborting execution")
sys.exit(1)
if __name__ == '__main__':
main() | mit |
aflaxman/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 33 | 3875 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for Bayesian Ridge Regression
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot true weights, estimated weights, histogram of the weights, and
# predictions with standard deviations
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True,
edgecolor='black')
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=0.1)
clf_poly = BayesianRidge()
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial Bayesian Ridge Regression", linewidth=lw)
plt.plot(X_plot, y_plot, color='gold', linewidth=lw,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| bsd-3-clause |
iohannez/gnuradio | gr-filter/examples/synth_to_chan.py | 7 | 3891 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6, fh=0.925*(4*fs)/2.0)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps) / nchans))
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in range(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = numpy.blackman
#winfunc = numpy.hamming
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pyplot.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
breisfeld/avoplot | examples/adv_sine_wave.py | 3 | 8650 | import numpy
import matplotlib.pyplot as plt
import math
from avoplot import plugins, series, controls, subplots
from avoplot.gui import widgets
import wx
plugin_is_GPL_compatible = True
class TrigFuncSubplot(subplots.AvoPlotXYSubplot):
def my_init(self):
"""
When defining your own subplot classes, you should not need to override
the __init__ method of the base class. Instead you should define a
my_init() method which takes no args. This will be called automatically
when the subplot is created. Use this to customise the subplot to suit
your specific needs - settings titles, axis formatters etc.
"""
#call the parent class's my_init() method. This is not required, unless
#you want to make use of any customisation done by the parent class.
#Note that this includes any control panels defined by the parent class!
super(TrigFuncSubplot, self).my_init()
#set up some axis titles
ax = self.get_mpl_axes()
ax.set_xlabel(r'$\theta$ (radians)')
ax.set_ylabel('y')
#add the units control panel to this subplot to allow the user to change
#the x-axis units.
self.add_control_panel(TrigSubplotUnitsCtrl(self))
#set the initial name of the subplot
self.set_name("Trig. Function Subplot")
class SineWaveSeries(series.XYDataSeries):
"""
Define our own data series type for Sine data. Unlike for subplots, when
defining custom data series, we do override the __init__ method.
"""
def __init__(self, *args, **kwargs):
super(SineWaveSeries, self).__init__(*args, **kwargs)
#add a control for this data series to allow the user to change the
#frequency of the wave using a slider.
self.add_control_panel(SineWaveFreqCtrl(self))
@staticmethod
def get_supported_subplot_type():
"""
This is how we restrict which data series can be plotted into which
types of subplots. Specialised subplots may provide controls for dealing
with very specific types of data - for example, our TrigFuncSubplot
allows the x-axis to be switched between degrees and radians, it would
therefore make no sense to allow time series data to be plotted into it.
However, it might make sense to allow a SineWaveSeries to be plotted
into a general AvoPlotXYSuplot, and therefore this is permitted by
AvoPlot. The rule is as follows:
A data series may be plotted into a subplot if the subplot is an
instance of the class returned by its get_supported_subplot_type()
method or any of its base classes.
"""
return TrigFuncSubplot
class AdvExamplePlugin(plugins.AvoPlotPluginSimple):
"""
This class is the same as that used for the Sine wave example, except
that we use the SineWaveSeries data series class that we defined above
rather than the generic XYDataSeries class used before.
"""
def __init__(self):
super(AdvExamplePlugin, self).__init__("Example Plugin with Controls",
SineWaveSeries)
self.set_menu_entry(['Examples', 'Adv. Sine Wave'],
"Plot a sine wave with variable frequency")
def plot_into_subplot(self, subplot):
x_data = numpy.linspace(0, 7, 500)
y_data = numpy.sin(x_data)
data_series = SineWaveSeries("adv sine wave", xdata=x_data,
ydata=y_data)
subplot.add_data_series(data_series)
return True
def rad2deg(theta, pos):
"""
Function for converting radians to degrees for use with matplotlib's
FuncFormatter object.
"""
return '%0.2f'%math.degrees(theta)
class TrigSubplotUnitsCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for trig function subplots allowing their x axis units
to be changed from radians to degrees.
"""
def __init__(self, subplot):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(TrigSubplotUnitsCtrl, self).__init__("Units")
#store the subplot object that this control panel is associated with,
#so that we can access it later
self.subplot = subplot
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(TrigSubplotUnitsCtrl, self).setup(parent)
#create a choice box for the different units for the x axis
#we use a avoplot.gui.widgets.ChoiceSetting object which is a
#thin wrapper around a wx.ChoiceBox, but provides a label and
#automatically registers the event handler.
units_choice = widgets.ChoiceSetting(self, "x-axis units:", "Radians",
["Radians", "Degrees"],
self.on_units_change)
#add the choice widget to the control panel sizer
self.Add(units_choice, 0,wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border=10)
def on_units_change(self, evnt):
"""
Event handler for change of x axis units events.
"""
#get the matplotlib axes object from the subplot
ax = self.subplot.get_mpl_axes()
#change the axis labels and label formatting based on the choice of
#units
if evnt.GetString() == 'Degrees':
ax.set_xlabel(r'$\theta$ (degrees)')
ax.xaxis.set_major_formatter(plt.FuncFormatter(rad2deg))
else:
ax.set_xlabel(r'$\theta$ (radians)')
ax.xaxis.set_major_formatter(plt.ScalarFormatter())
#draw our changes in the display
self.subplot.update()
class SineWaveFreqCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for sine wave data series allowing their frequency to
be changed using a slider.
"""
def __init__(self, series):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(SineWaveFreqCtrl, self).__init__("Freq.")
#store the data series object that this control panel is associated with,
#so that we can access it later
self.series = series
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(SineWaveFreqCtrl, self).setup(parent)
#create a label for the slider
label = wx.StaticText(self, wx.ID_ANY, 'Frequency')
self.Add(label, 0,
wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL,
border=10)
#create a frequency slider
self.slider = wx.Slider(self, wx.ID_ANY, value=1, minValue=1,
maxValue=30, style=wx.SL_LABELS)
#add the slider to the control panel's sizer
self.Add(self.slider, 0,
wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, border=10)
#register an event handler for slider change events
wx.EVT_COMMAND_SCROLL(self, self.slider.GetId(), self.on_slider_change)
def on_slider_change(self, evnt):
"""
Event handler for frequency slider change events.
"""
#change the frequency of the sine wave data accordingly
f = self.slider.GetValue()
x_data = numpy.linspace(0, 7, 2000)
y_data = numpy.sin(x_data * f)
#change the data in the series object
self.series.set_xy_data(xdata=x_data, ydata=y_data)
#draw our changes on the display
self.series.update()
#register the plugin with AvoPlot
plugins.register(AdvExamplePlugin())
| gpl-3.0 |
arg-hya/taxiCab | Tools/Misc/TaskPointGenerator.py | 1 | 1502 | import json
import shapefile as shp
import matplotlib.pyplot as plt
import random
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
numbersX = []
numbersY = []
TaskPoints = {}
shpFilePath = r"D:\TaxiCab\mycode\Plots\ShapefileAndTrajectory\taxi_zones\taxi_zones"
sf = shp.Reader(shpFilePath)
records = sf.records()
plt.figure()
for shape in sf.shapeRecords():
#print(records[0][3])
x = [i[0] for i in shape.shape.points[:]]
meanX = mean(x)
numbersX.append(meanX)
y = [i[1] for i in shape.shape.points[:]]
meanY = mean(y)
numbersY.append(meanY)
plt.plot(x,y)
num = 0 ##range(263)
for x, y in zip(numbersX, numbersY):
plt.text(x, y, str(num), color="red", fontsize=12)
num = num + 1
plt.plot(numbersX, numbersY, 'o', color='blue', markersize=7, markeredgewidth=0.0)
#print (len(numbersX))
#print (numbersY)
plt.show()
Date_min = 1
Date_max = 30
Beta_min = 2
Beta_max = 30
#print (range(len(numbersX)))
for i in range(len(numbersX)):
date = "2017/1/"
TaskPoints_trace = []
TaskPoints_trace.append(records[i][3])
TaskPoints_trace.append(numbersX[i])
TaskPoints_trace.append(numbersY[i])
TaskPoints_trace.append(random.randint(Beta_min, Beta_max))
date += str(random.randint(Date_min, Date_max))
TaskPoints_trace.append(date)
TaskPoints[i] = TaskPoints_trace
json.dump(TaskPoints, open('Data1/TaxiZone_TaskPoints.json', 'w'), indent=4, sort_keys=True, separators=(',', ':'))
| gpl-3.0 |
elidrc/PSO | test_pso.py | 1 | 1192 | from benchmark_functions import *
from pso import *
import matplotlib.pyplot as plt
iterations = 100
particles = 500
dimensions = 2
search_space = [[-5.12] * dimensions, [5.12] * dimensions]
# print init_pso(iterations, particles, search_space)
velocity, fitness, local_best, local_position, global_best, global_position = init_pso(iterations, particles,
search_space)
# print create_swarm(particles, search_space)
swarm = create_swarm(particles, search_space)
iteration = 0
while iteration < iterations:
fitness = [sphere(solution) for solution in swarm]
local_best, local_position = update_local_position(swarm, fitness, local_best, local_position)
global_best, global_position = update_global_position(swarm, local_best, global_best, global_position, iteration)
swarm, velocity = update_swarm(swarm, velocity, local_position, global_position, iteration)
swarm = check_swarm(swarm, search_space)
iteration += 1
plt.plot(global_best, '.-', label='%f' % min(global_best))
plt.xlim(-1, iteration)
# plt.ylim(min(global_best), max(global_best)+0.01)
plt.legend()
plt.show()
| mit |
rmhyman/DataScience | Lesson3/exploratory_data_analysis_subway_data.py | 1 | 1558 | import numpy as np
import pandas
import matplotlib.pyplot as plt
def entries_histogram(turnstile_weather):
'''
Before we perform any analysis, it might be useful to take a
look at the data we're hoping to analyze. More specifically, let's
examine the hourly entries in our NYC subway data and determine what
distribution the data follows. This data is stored in a dataframe
called turnstile_weather under the ['ENTRIESn_hourly'] column.
Let's plot two histograms on the same axes to show hourly
entries when raining vs. when not raining. Here's an example on how
to plot histograms with pandas and matplotlib:
turnstile_weather['column_to_graph'].hist()
Your histogram may look similar to bar graph in the instructor notes below.
You can read a bit about using matplotlib and pandas to plot histograms here:
http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
You can see the information contained within the turnstile weather data here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
plt.figure()
#print turnstile_weather['rain'] == 1
turnstile_weather[turnstile_weather['rain' ]== 0]['ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is raining
turnstile_weather[turnstile_weather['rain'] == 1]['ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is not raining
return plt
| mit |
josiahseaman/DNAResearch | Repeat_Graph.py | 1 | 8201 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
from IPython.core.display import Image
# <codecell>
data = []
for y in range(10):
data.append([y+x for x in range(10)])
# print(data)
Image(data=data)
# <headingcell level=1>
# Matplot Lib
# <codecell>
alpha = 0.7
phi_ext = 2 * pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * cos(phi_p)*cos(phi_m) - alpha * cos(phi_ext - 2*phi_p)
phi_m = linspace(0, 2*pi, 100)
phi_p = linspace(0, 2*pi, 100)
X,Y = meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*pi), Y/(2*pi), Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
# <codecell>
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import random
bignum = 10
data = []
for i in range(bignum):
data.append([random.random() for x in range(bignum)])
mat = np.array(data) #random.random((bignum, bignum))
X, Y = np.mgrid[:bignum, :bignum]
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
surf = ax.plot_surface(X,Y,mat)
plt.show()
# <headingcell level=2>
# Most simple Pixel Map
# <codecell>
def basic_func(x, y):
return x+y
X, Y = np.mgrid[:bignum, :bignum]
Z = basic_func(X, Y)
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X, Y, Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
# <headingcell level=2>
# Basic Repeat Map
# <codecell>
raster_width = 11
seq = 'TCTCGTGAACCGTTCTTTCCCGCGGACGTGATGGATGTGGGTGCCTTTATTTGCGACGATATGGTCCGTAAATTAGGTCTCGTTGTTTGTACCCTCTCACTTGGCCGCTTCAACTTTTTTCCGATAATGTCTAATGCACCGACGGAATTATTGTACAGAGTAGCAAGCTCAGGTTGCACGGCAGACCTTGCCGCGTCGGGTCTGCGCACCACCCAAATCTGGGCGCGTCTGGGCCTCGCTGCTACACTGGTTAACCATGCTTCAGACTCTGTGACGATGAAATATCCAAACGACGTTGAATAAAAACGACGGGGAGCGGCGGTGATTTTTATCAATCGCGGTGAAGCAGTTATGCTCGACATCTATTAACAACAGGAGAAAGGCGCCACCGCTCCGGTGTATTATACACTGGGCCGTTTGACCGTCTCATCGACGGGCAACATGACCAAACCGCACATGCATTTCTCGGGCCGAATCGCCCGCGCCTACTGGAAAGCCGGCTCTGGCGATTATGCCGATTTTGAAAGTTTTCTTTCATCCAAAGCGTATATTATTCAGTTTCAAATATCGACCTTGCTGAAGAAAATCAAAGTGATCTTTTTATTTAAAGGCAATGATGGCGATGTACTTAATCGTGCGATCGCTTTGCGGCAGGGCCCCCGTTGGAATAGATTTGATATGCAGGAGCTGTATCCGATCTGGCATATTCTGTCCAATTAACAGCGCAATATCAACGCTGCGCTGTCTCTGCTGGTCGGCGAACACGGACTGATTCAGTCTCCTTTGGCAGGTTTCGTACAAGGTACCACGCTGAGCGCCCTGGGCCAACGGGACTTTGCACTGCGTAAGGACGCAGTGGAAGTGGGCTCCCTGAACCCTGAAGCCGGTGAAGACAAACGTACGACCATCATCTTTACCTATGTACTGCAGCAGCAAGGTTACAAATCCGGTAAATGTTGCGGCGAGGATAAATATGACGTTATTCTGAAAGAAGGGATTATCTACTATACCGTAGTTCTGATCATCCGGGGCTTCAAAGATTCAGACAAGGACGAAGATGACGGACTTAAACATGCGCTTGAAGGATTCGAAGGCGAACGTGGCGCTGCTCTGTCGACTGTAGCATCCGCGTCCGCATGGAGGAGTGGTCAACATAACGGCACCACCCCTTCGTCAAAGGTGGCGCAAGAACTCCGCCAGAAACGCTGCAATTCCAATACAAACATCACCTGCCCACACGTAAACCTTGAACTTAACAAGATATATCGGCTCTTCCCGCTCCAAAACTAAAAGATACCGGACGTGATCGCGATCAGAGGCAAATACTTGACTCATAAGCTGTCAACGGTTGATTTACTGGGTTTTTCTCCGCCAACCTGTCTGCGCTTGCATGATTATGAAGCCGTGTCAGATCCGATGAAAGTGGCGAATTTCCATAACCAGATGGGTTTCTTGGTAGGCGATGCCATCTTCGTTCAGGAACTCATCAAACAGACGGTCGCGCTGATCATTAACAAAGTAAAAAACCCTGGTGGCCTGAAACAGCGAGCCTCAGAAAAACCGAACTCTCAGCTAGTTTGAGGTGGGTCTAATCATGAGCCAGCACTGCGCGACCGTGGGTCTCGTATTCTGGGTGAGCGCGTGCGTGACGATATTCTGTATCTTGTTAACATGGGTTTTAAACATTCGTTCTTGGCTGACCGTGTCATCATGATCAAGATTGAAGAAGAGCTGCATTTTCATACCCAGAGCTACGAGGTCACCTCGCTCGGACAGGGGGTCAGTAATTACCTGGTCACAGCCGATGCGAAAGCCCCAAAACGTCGCCAACTGGCATATCATCTTGGTACTGGGTTCTCATCATTCTACGCTGGGGCGGATGATCAGGCGTCGCGCGTGGAAGTCAAACAGATGCAACGGATCCTGATTGCAGCCGCCCTGCCGGGCCTCCGAAAGAAATTGCGCCTGGATGCACACAATGAATTTATTGTCCCAATCATGACCGAGTTCGACCAGACCGGCCCCTTAACCTTAGGCTACGCATCAGAAAAACGCGCGCTCGATAACATCATGGTGAGTCAGGATTCTGTGCTGGGGAATCTCTTTATGAAATTTTTAGGTGTGCTGGTGGTCGGTATCAGCCGGACAGCGATAGCGGACCCAGATAAGTATATGGCTATTCTGCTGGGTGCGGTTTTCGACATGCTGGCGATGAAAATCATTGAAGTCTTAGATGTTACGTCCAACCGCAACTATTTGACCAATCGCCGTACGACGGAAATCGCAGCTGTGGCAGAAACCTGTGAGGACGGAGCGTTTGTGATGCTGCTGACCACGTGGCTGGGCAAGAAGTCGGATTCCCTGAAGTTCCCTAACTTAGTGATTGTCTATTATATAGTTATGGTCGGCGGCCCGTGCACCGGAGAGCAGCAGAAACGTGCTACAGCAGCCATGAGTAGCGAAATTGCGCTCCAGCCGTATTTCCGCTTCCGCCGGATTGAGCACACTGTCCGCGGCCGCGTCTTTTGACTGGAAAAAAGTTTCGGCGAAGACGCCGGCGATAATCTGGTCTCCAACAAAACCAAACGTCGCGGTAAAGGGCCGCAGTTTAAATATGTGGAACTGGCAGAACTGACCTTAATCAAGCTGTCGATTTGAGGCGGTGTAGCTAACATGGGAGGTAATGCACGTCATGGAATGAAAGGCATTCTGGGTCCGCTGCGCGTTGCCTCTTTAGCTTATCAGGCGAAAGGTGTCATCGGTTTATCTATGTTAAAAAACTGGGCTCCGGCCTAACAAAAAAATCTGCTGTCAGTTGCTGTACTGGTCCCGCTGAGCGCGAGCACAGGGAGCGCCCTGGAAATGGTGCGCGGTCTGAAAGAAGGCAACGCAGTCTTGGTGGCGAAGATGGGGATCGCCAAAGGAGCGACAGGTCGCTGGGCGGCTGTGGCAGATGGTAACGTCGCACCTCCGCTTCGCGAGCAATTAAACTTTCAGGCT'
# <codecell>
seq = 'CTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTACTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTAGTTACTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGCCTTGC'
# <codecell>
#seq[1*x_size : 1*x_size + raster_width]
seq[7:15]
# <codecell>
sum([True, False, True,True])
# <codecell>
raster_width = 11
x_size = 75 # frequency range
y_size = int(len(seq) / raster_width) # number of lines: (cut off the end)
raster_width
# <codecell>
def repeat_score(x, y):
start_str = seq[y*raster_width : (y+1)*raster_width]
target_str = seq[y*raster_width + x : (y+1)*raster_width + x]
actual_width = min(len(start_str), len(target_str))
return sum([start_str[i] == target_str[i] for i in range(actual_width)])
# <codecell>
[[repeat_score(x,y) for x in range(1,x_size-1)] for y in range(y_size-10)]
# <codecell>
X, Y = np.mgrid[:x_size, :y_size]
Z = np.array([[repeat_score(x,y) for x in range(1,x_size+1)] for y in range(y_size)]).T
# <codecell>
fig, ax = plt.subplots()
p = ax.pcolor(X, Y, Z,
cmap=cm.Greys_r,
vmin=0, vmax=raster_width)
cb = fig.colorbar(p, ax=ax)
# <codecell>
x, y = 20, 7
print( seq[y*x_size : y*x_size + raster_width])
print( seq[y*x_size + x : y*x_size + raster_width + x])
sum([start_str[i] == target_str[i] for i in range(raster_width)])
# <headingcell level=3>
# Notes
# <markdowncell>
# I most of the trouble that I had make this was because I am unfamiliar with NumPy arrays and matplotlib. The lines for Z and p = ax.pcolor(X, Y, Z, cmap=cm.Greys_r, vmin=0, vmax=raster_width) are very sensitive. The good and the bad of having a graphing platform is that I get scale axes for free. It will often squish the pixels. I prefer square pixels. I need to figure out how to generate a highly non-square graph since the Repeat Map is usually 25 wide x 200 high.
# <headingcell level=1>
# Finished Product
# <codecell>
from Sequence_Utils import debugSequence, weighted_sequence
# <codecell>
class RepeatMap():
def __init__(self, sequence):
self.seq = sequence
self.raster_width = 11
self.x_size = 25 # frequency range
self.y_size = int(len(self.seq) / self.raster_width) # number of lines: (cut off the end)
def repeat_score(self, x, y):
start_str = self.seq[y*self.raster_width : (y+1)*self.raster_width]
target_str = self.seq[y*self.raster_width + x : (y+1)*self.raster_width + x]
actual_width = min(len(start_str), len(target_str))
return sum([start_str[i] == target_str[i] for i in range(actual_width)])
def render(self):
X, Y = np.mgrid[:self.x_size, :self.y_size]
Z = np.array([[self.repeat_score(x,y) for x in range(1,self.x_size+1)] for y in range(self.y_size)]).T
fig, ax = plt.subplots()
fig.set_size_inches(self.x_size /10, self.y_size /10)
p = ax.pcolor(X, Y, Z,
cmap=cm.Greys_r,
vmin=0, vmax=self.raster_width)
cb = fig.colorbar(p, ax=ax)
plt.gca().invert_yaxis()
# <codecell>
rp = RepeatMap(debugSequence(25, 200, 5))
rp.render()
# <codecell>
| apache-2.0 |
CharlesGulian/Deconv | fits_tools_tesla.py | 1 | 5575 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 21:18:54 2016
@author: charlesgulian
"""
import os
#os.chdir('/Users/annepstein/Work/Deconv')
curr_dir = os.getcwd()
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
#from photutils import aperture_photometry
#from photutils import CircularAperture
def binImage(pixelArray,M=3,N=3):
'''
- Bins pixels along image axes into MxN bins (default MxN = 3x3)
'''
pixels = pixelArray
imgDim1,imgDim2 = np.shape(pixels)
xBinSize,yBinSize = float(imgDim1)/float(M),float(imgDim2)/float(N)
imgBinDict = {} # Dictionary for storing
#print xBinSize,yBinSize
for i in range(M):
for j in range(N):
imgBinDict[i+1,j+1] = pixels[int(np.ceil(i*xBinSize)):int(np.floor((i+1)*xBinSize)),\
int(np.ceil(j*yBinSize)):int(np.floor((j+1)*yBinSize))]
#print ''
#print 'Bin: ',i,j
#print 'Shape: ',np.shape(imgBinDict[i,j])
return imgBinDict
def computeObjectFlux(x0,y0,radius,image):
'''
- Compute total flux within circular aperture of the given radius
from source at image coordinates (x0,y0)
'''
position = (x0,y0)
aperture = CircularAperture(position,r=radius)
return aperture_photometry(image,aperture)[0][0]
# getPixels() can be replaced by fits.getdata() (I did not know this)
def getPixels(image_file,delete=False):
hdulist = fits.open(image_file)
data = hdulist[0].data
hdulist.close()
if delete:
del hdulist[0].data
return data
def applyMask(image,mask):
'''
- Apply a binary mask to an array
'''
masked_image = np.multiply(image,mask)
return masked_image
def maskImage(image_file,mask_file,masked_image_file=None,Return=False):
'''
- Takes a .fits image file and .fits binary mask file as input
- Applies binary mask to .fits image data
- Rewrites masked image to new .fits file (masked_image_file)
'''
image = fits.getdata(image_file)
mask = fits.getdata(mask_file)
masked_image = applyMask(image,mask)
inds = np.where(masked_image == 0.0)
masked_image[inds] += 1e-12 # Prevent NaNs
if masked_image_file == None:
masked_image_file = image_file.replace('.fits','_masked.fits').replace('Good','MaskedImages').replace('Bad','MaskedImages')
fits.writeto(masked_image_file,masked_image,fits.getheader(image_file),clobber=True)
if Return:
return masked_image
def shift_image(image,x_offset,y_offset):
# Shifts image pixels from (x,y) to (x-x_offset),(y-y_offset)
dims = np.shape(image) # Image dimensions
dim1,dim2 = dims[0],dims[1]
blank = np.zeros(dims) + 1e-8 # Define blank array to receive new image data
shifted_image = blank
dy,dx = x_offset,y_offset # These are intentionally reversed
for i in range(dim1):
for j in range(dim2):
if (i+dx < dim1) and (i+dx >= 0) and (j+dy < dim2) and (j+dy >= 0):
shifted_image[i,j] = image[i+dx,j+dy] # Why does this work?
return shifted_image
def subtractBias(image_file,new_image_file=None,bias=0.0,Return=False):
'''
- Takes a .fits image file as input
- Subtracts median from image data, writes new data to new image file (new_image_file)
'''
if new_image_file == None:
new_image_file = image_file
image = fits.getdata(image_file)
image -= bias
fits.writeto(new_image_file,image,fits.getheader(image_file),clobber=True)
if Return:
return image
def subtractMedian(image_file,new_image_file=None,Return=False):
'''
- Takes a .fits image file as input
- Subtracts median from image data, writes new data to new image file (new_image_file)
'''
if new_image_file == None:
new_image_file = image_file
image = fits.getdata(image_file)
image -= np.median(image)
fits.writeto(new_image_file,image,fits.getheader(image_file),clobber=True)
if Return:
return image
def write_pixel_offset(x_offset,y_offset,image_file,new_image_file=None):
# Add (x,y) pixel offset to .FITS header of an image
header = fits.getheader(image_file) # Get original .FITS header
header['x_offset'] = x_offset # Add new keywords and values to header
header['y_offset'] = y_offset
# If no new image file specified, default writes new header to original image header
if new_image_file == None:
new_image_file = image_file
# Write header to new image
fits.writeto(new_image_file,fits.getdata(image_file),header,clobber=True)
'''
# Testing:
test_image_file = 'AstroImages/Good/fpC-6484-x4078-y134_stitched_alignCropped.fits'
test_image = fits.getdata(test_image_file)
catalog = 'Results/fpC-6484-x4078-y134_stitched_alignCropped_fpC-6484-x4078-y134_stitched_alignCropped_compare.cat'
import sex_stats
fig = sex_stats.data(catalog)
x = fig.get_data('X_IMAGE')
y = fig.get_data('Y_IMAGE')
xlow = np.where(x > 651.0)
xhigh = np.where(x < 658.9)
xin = np.intersect1d(xlow,xhigh)
ylow = np.where(y > 820.0)
yhigh = np.where(y < 826.0)
yin = np.intersect1d(ylow,yhigh)
obj = np.intersect1d(xin,yin)
DATA = fig.Data
x,y = 848.39102,727.23274
radius = 10.
flux = computeObjectFlux(x,y,radius,test_image)
print flux
#testMask = 'AstroImages/Masks/fpC-6484-x4078-y134_stitched_alignCropped_mask.fits'
#maskImage(testImage,testMask)
'''
| gpl-3.0 |
gdl-civestav-localization/cinvestav_location_fingerprinting | experimentation/__init__.py | 1 | 1691 | import os
import cPickle
import matplotlib.pyplot as plt
from datasets import DatasetManager
def plot_cost(results, data_name, plot_label):
plt.figure(plot_label)
plt.ylabel('Accuracy (m)', fontsize=30)
plt.xlabel('Epoch', fontsize=30)
plt.yscale('symlog')
plt.tick_params(axis='both', which='major', labelsize=20)
plt.grid(True)
for i in range(1, 2, 1):
y, x = zip(*results[i][data_name])
name = results[i]['Name']
plt.plot(x, y, label=name, linewidth=5.0)
plt.legend(fontsize='xx-large')
def get_metrics(test_set_y, predicted_values, model_name):
for i in xrange(len(predicted_values)):
print predicted_values[i][1]
if __name__ == '__main__':
"""
seed = 50
with open(os.path.join('experimentation', 'cinvestav_testbed_experiment_results_' + str(seed)), 'rb') as f:
results = cPickle.load(f)
plot_cost(
results=results,
data_name='cost_train',
plot_label='Cost on train phase')
plot_cost(
results=results,
data_name='cost_valid',
plot_label='Cost on valid phase')
plot_cost(
results=results,
data_name='cost_test',
plot_label='Cost on test phase')
plt.show()
"""
seed = 50
dataset, result = DatasetManager.read_dataset2('test_cleaned_dataset.csv', shared=True, seed=seed)
with open(os.path.join('trained_models', 'Logistic Regressionbrandeis_university.save'), 'rb') as f:
model = cPickle.load(f)
predicted_values = model.predict(dataset)
get_metrics(
test_set_y=result,
predicted_values=predicted_values,
model_name='Logistic Regression'
)
| gpl-3.0 |
Newsrecommender/newsrecommender | ArticleRecommendationProject/Recommendation/Collab_Content_Based.py | 1 | 5856 | import yaml
import pandas as pd
import numpy as np
import sys
import os
from math import sqrt
import matplotlib
import matplotlib.pyplot as plot
import networkx as nx
def get_script_directory():
"""
This function returns the directory of the script in scrip mode
In interactive mode returns interpreter name.
"""
path = os.path.realpath(sys.argv[0])
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
def similarity_score(Article1,Article2):
"""
This function calculates Euclidean distance between to objects
"""
both_viewed = {}
for item in dataset[Article1]:
if item in dataset[Article2]:
both_viewed[item] = 1
# The Conditions to check if they both have common rating items
if len(both_viewed) == 0:
return 0
# Finding Euclidean distance
sum_of_euclidean_distance = []
for item in dataset[Article1]:
if item in dataset[Article2]:
sum_of_euclidean_distance.append(pow(dataset[Article1][item] - dataset[Article2][item], 2))
sum_of_euclidean_distance = sum(sum_of_euclidean_distance)
#print (sum_of_euclidean_distance)
return 1/(1+sqrt(sum_of_euclidean_distance))
def pearson_correlation(Article1,Article2):
"""
This function calculates Pearson correlation between two vectors
"""
both_rated = {}
for item in dataset[Article1]:
if item in dataset[Article2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# Checking for number of ratings in common
if number_of_ratings == 0:
return 0
# Add up all the preferences of each user
person1_preferences_sum = sum([dataset[Article1][item] for item in both_rated])
person2_preferences_sum = sum([dataset[Article2][item] for item in both_rated])
# Sum up the squares of preferences of each user
person1_square_preferences_sum = sum([pow(dataset[Article1][item],2) for item in both_rated])
person2_square_preferences_sum = sum([pow(dataset[Article2][item],2) for item in both_rated])
# Sum up the product value of both preferences for each item
product_sum_of_both_users = sum([dataset[Article1][item] * dataset[Article2][item] for item in both_rated])
# Calculate the pearson score
numerator_value = product_sum_of_both_users - (person1_preferences_sum*person2_preferences_sum/number_of_ratings)
denominator_value = sqrt((person1_square_preferences_sum - pow(person1_preferences_sum,2)/number_of_ratings) * (person2_square_preferences_sum -pow(person2_preferences_sum,2)/number_of_ratings))
if denominator_value == 0:
return 0
else:
r = numerator_value/denominator_value
return r
def find_most_similar_objects(Article1,number_of_users):
# returns the number_of_users (similar persons) for a given specific person.
scores = [(pearson_correlation(Article1,other_person),other_person) for other_person in dataset if other_person != Article1 ]
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
return (scores[0:number_of_users][0][1])
def get_recommendations(objects, no_of_recommendations):
"""
This function generates recommendations for specified object
"""
recommended_articles = []
input_articles = []
for article in objects:
# print (article, find_most_similar_objects(article,2)[0][1], find_most_similar_objects(article,2)[1][1])
input_articles.append(article)
recommended_articles.append(find_most_similar_objects(article,no_of_recommendations))
return input_articles,recommended_articles
# Find the path of script
path = get_script_directory()
print ('Script is located at {}'.format(path))
os.chdir(path)
# import config files
print("Reading configuration")
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
user_ratings_files_path = cfg['project_test_conf']['ratings_file_path']
user_ratings_csv_filename = cfg['project_test_conf']['ratings_file_name']
articles_files_path = cfg['project_test_conf']['articles_file_path']
articles_csv_filename = cfg['project_test_conf']['articles_file_name']
ratings_index = cfg['project_test_conf']['ratings_index_column']
output_file_path = cfg['project_test_conf']['output_path']
output_file_name = cfg['project_test_conf']['output_file_name']
ratings_file = os.path.join(user_ratings_files_path, user_ratings_csv_filename)
articles_file = os.path.join(articles_files_path, articles_csv_filename)
Output_Recommendations = os.path.join(output_file_path, output_file_name)
print("Configuration loaded successfully")
print ('Reading ratings from file {}'.format(ratings_file))
user_ratings = pd.read_csv(ratings_file, index_col=ratings_index)
articles_db = pd.read_csv(articles_file, index_col=ratings_index)
objects_list = list(user_ratings.index)
user_ratings_T = user_ratings.transpose()
dataset = user_ratings_T.to_dict()
# Get recommendations
print('Calculations in progress...')
Article, recommended_article = get_recommendations(objects_list, 5)
print('Calculations completed.')
# Create output files
print('Creating output file')
recommended_article_title = []
for content in recommended_article:
recommended_article_title.append(articles_db.Title[content])
input_article_title = []
for content in Article:
input_article_title.append(articles_db.Title[content])
df = pd.DataFrame()
df['Article'] = Article
df['Recommendation'] = recommended_article
df['News'] = input_article_title
df['Recommended_News'] = recommended_article_title
df = df.set_index('Article', drop=True, append=False, inplace=False, verify_integrity=False)
df.to_csv(Output_Recommendations)
print('Output file created.')
print('Check output files at {}'.format(Output_Recommendations))
| mit |
mrcslws/htmresearch | projects/feedback/feedback_sequences_additional.py | 7 | 24229 |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file runs a number of experiments testing the effectiveness of feedback
with noisy inputs.
"""
import os
from copy import deepcopy
import numpy
import cPickle
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats
matplotlib.rcParams['pdf.fonttype'] = 42
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
import feedback_experiment
from feedback_experiment import FeedbackExperiment
def convertSequenceMachineSequence(generatedSequences):
"""
Convert a sequence from the SequenceMachine into a list of sequences, such
that each sequence is a list of set of SDRs.
"""
sequenceList = []
currentSequence = []
for s in generatedSequences:
if s is None:
sequenceList.append(currentSequence)
currentSequence = []
else:
currentSequence.append(s)
return sequenceList
def generateSequences(n=2048, w=40, sequenceLength=5, sequenceCount=2,
sharedRange=None, seed=42):
"""
Generate high order sequences using SequenceMachine
"""
# Lots of room for noise sdrs
patternAlphabetSize = 10*(sequenceLength * sequenceCount)
patternMachine = PatternMachine(n, w, patternAlphabetSize, seed)
sequenceMachine = SequenceMachine(patternMachine, seed)
numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength,
sharedRange=sharedRange )
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
return sequenceMachine, generatedSequences, numbers
def sparsenRange(sequenceMachine, sequences, startRange, endRange, probaZero):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p < endRange and p >= startRange:
newsdr = numpy.array(list(sdr))
keep = numpy.random.rand(len(newsdr)) > probaZero
newsdr = newsdr[keep==True]
newSequence.append(set(newsdr))
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def crossSequences(sequenceMachine, sequences, pos):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos:
newSequence.append(sequences[(numseq +1) % len(sequences)][p])
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addTemporalNoise(sequenceMachine, sequences, noiseStart, noiseEnd, noiseProba):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= noiseStart and p < noiseEnd:
newsdr = patternMachine.addNoise(sdr, noiseProba)
newSequence.append(newsdr)
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addPerturbation(sequenceMachine, sequences, noiseType, pos, number=1):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos and p < pos+number:
if noiseType == "skip":
pass
elif noiseType == "replace":
newsdr = patternMachine.addNoise(sdr, 1.0)
newSequence.append(newsdr)
elif noiseType == "repeat":
newSequence.append(s[p-1])
else:
raise("Unrecognized Noise Type!")
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def runInference(exp, sequences, enableFeedback=True, apicalTiebreak=True,
apicalModulationBasalThreshold=True, inertia=True):
"""
Run inference on this set of sequences and compute error
"""
if enableFeedback:
print "Feedback enabled: "
else:
print "Feedback disabled: "
error = 0
activityTraces = []
responses = []
for i,sequence in enumerate(sequences):
(avgActiveCells, avgPredictedActiveCells, activityTrace, responsesThisSeq) = exp.infer(
sequence, sequenceNumber=i, enableFeedback=enableFeedback, apicalTiebreak=apicalTiebreak,
apicalModulationBasalThreshold=apicalModulationBasalThreshold, inertia=inertia)
error += avgActiveCells
activityTraces.append(activityTrace)
responses.append(responsesThisSeq)
print " "
error /= len(sequences)
print "Average error = ",error
return error, activityTraces, responses
def runExp(noiseProba, numSequences, nbSeeds, noiseType, sequenceLen, sharedRange, noiseRange, whichPlot, plotTitle):
allowedNoises = ("skip", "replace", "repeat", "crossover", "pollute")
if noiseType not in allowedNoises:
raise(RuntimeError("noiseType must be one of the following: ".join(allowedNoises)))
meanErrsFB = []; meanErrsNoFB = []; meanErrsNoNoise = []
stdErrsFB = []; stdErrsNoFB = []; stdErrsNoNoise = []
meanPerfsFB = []; stdPerfsFB = []
meanPerfsNoFB = []; stdPerfsNoFB = []
stdsFB = []
stdsNoFB=[]
activitiesFB=[]; activitiesNoFB=[]
diffsFB = []
diffsNoFB = []
overlapsFBL2=[]; overlapsNoFBL2=[]
overlapsFBL2Next=[]; overlapsNoFBL2Next=[]
overlapsFBL4=[]; overlapsNoFBL4=[]
overlapsFBL4Next=[]; overlapsNoFBL4Next=[]
corrsPredCorrectFBL4=[]; corrsPredCorrectNoFBL4=[]
diffsFBL4Pred=[]; diffsNoFBL4Pred=[]
diffsFBL4PredNext=[]; diffsNoFBL4PredNext=[]
diffsFBL2=[]; diffsNoFBL2=[]
diffsFBL2Next=[]; diffsNoFBL2Next=[]
diffsNoAT = []; overlapsNoATL2=[]; overlapsNoATL2Next=[]; overlapsNoATL4=[]
overlapsNoATL4Next=[]
corrsPredCorrectNoATL4=[]; diffsNoATL4Pred=[]; diffsNoATL4PredNext=[]
diffsNoATL2=[]; diffsNoATL2Next=[]
diffsNoAM = []; overlapsNoAML2=[]; overlapsNoAML2Next=[]; overlapsNoAML4=[]
overlapsNoAML4Next=[]
corrsPredCorrectNoAML4=[]; diffsNoAML4Pred=[]; diffsNoAML4PredNext=[]
diffsNoAML2=[]; diffsNoAML2Next=[]
diffsNoIN = []; overlapsNoINL2=[]; overlapsNoINL2Next=[]; overlapsNoINL4=[]
overlapsNoINL4Next=[]
corrsPredCorrectNoINL4=[]; diffsNoINL4Pred=[]; diffsNoINL4PredNext=[]
diffsNoINL2=[]; diffsNoINL2Next=[]
errorsFB=[]; errorsNoFB=[]; errorsNoNoise=[]
perfsFB = []; perfsNoFB = []
#for probaZero in probaZeros:
seed = 42
for seedx in range(nbSeeds):
seed = seedx + 123
profile = False,
L4Overrides = {"cellsPerColumn": 8}
numpy.random.seed(seed)
# Create the sequences and arrays
print "Generating sequences..."
sequenceMachine, generatedSequences, numbers = generateSequences(
sequenceLength=sequenceLen, sequenceCount=numSequences,
sharedRange=sharedRange,
seed=seed)
sequences = convertSequenceMachineSequence(generatedSequences)
noisySequences = deepcopy(sequences)
# Apply noise to sequences
noisySequences = addTemporalNoise(sequenceMachine, noisySequences,
noiseStart=noiseRange[0], noiseEnd=noiseRange[1],
noiseProba=noiseProba)
# *In addition* to this, add crossover or single-point noise
if noiseType == "crossover":
noisySequences = crossSequences(sequenceMachine, noisySequences,
pos=sequenceLen/2)
elif noiseType in ("repeat", "replace", "skip"):
noisySequences = addPerturbation(sequenceMachine, noisySequences,
noiseType=noiseType, pos=sequenceLen/2, number=1)
inferenceErrors = []
#Setup experiment and train the network on sequences
print "Learning sequences..."
exp = FeedbackExperiment(
numLearningPasses= 2*sequenceLen, # To handle high order sequences
seed=seed,
L4Overrides=L4Overrides,
)
exp.learnSequences(sequences)
print "Number of columns in exp: ", exp.numColumns
print "Sequences learned!"
# Run inference without any noise. This becomes our baseline error
standardError, activityNoNoise, responsesNoNoise = runInference(exp, sequences)
inferenceErrors.append(standardError)
runError, activityFB, responsesFB = runInference(
exp, noisySequences, enableFeedback=True)
runError, activityNoFB, responsesNoFB = runInference(
exp, noisySequences, enableFeedback=False)
runError, activityNoAT, responsesNoAT = runInference(
exp, noisySequences, enableFeedback=True, apicalTiebreak=False)
runError, activityNoAT, responsesNoAM = runInference(
exp, noisySequences, enableFeedback=True, apicalModulationBasalThreshold=False)
runError, activityNoIN, responsesNoIN = runInference(
exp, noisySequences, enableFeedback=True, inertia=False)
# Now that actual processing is done, we compute various statistics and plot graphs.
seqlen = len(noisySequences[0])
sdrlen = 2048 * 8 # Should be the total number of cells in L4. Need to make this more parametrized!
for numseq in range(len(responsesNoNoise)):
diffsFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAT.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoATL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAM.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoAML4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoIN.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoINL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
cpcfb = []; cpcnofb=[]; cpcnoat=[]; cpcnoam=[]; cpcnoin=[];
for x in range(seqlen):
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcfb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnofb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAT[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoat.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAM[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoam.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoIN[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoin.append(numpy.corrcoef(z1, z2)[0,1])
# Note that the correlations are appended across all seeds and sequences
corrsPredCorrectNoFBL4.append(cpcnofb[1:])
corrsPredCorrectNoATL4.append(cpcnoat[1:])
corrsPredCorrectNoINL4.append(cpcnoin[1:])
corrsPredCorrectNoAML4.append(cpcnoam[1:])
corrsPredCorrectFBL4.append(cpcfb[1:])
# diffsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
print "Size of L2 responses (FB):", [len(responsesFB[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L2 responses (NoNoise):", [len(responsesNoNoise[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (FB):", [len(responsesFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoFB):", [len(responsesNoFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAT):", [len(responsesNoAT[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAM):", [len(responsesNoAM[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoIN):", [len(responsesNoIN[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoNoise):", [len(responsesNoNoise[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 predictions (FB):", [len(responsesFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoFB):", [len(responsesNoFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAT):", [len(responsesNoAT[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAM):", [len(responsesNoAM[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoIN):", [len(responsesNoIN[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoNoise):", [len(responsesNoNoise[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "L2 overlap with current (FB): ", overlapsFBL2[-1]
print "L4 overlap with current (FB): ", overlapsFBL4[-1]
print "L4 overlap with current (NoFB): ", overlapsNoFBL4[-1]
print "L4 correlation pred/correct (FB): ", corrsPredCorrectFBL4[-1]
print "L4 correlation pred/correct (NoFB): ", corrsPredCorrectNoFBL4[-1]
print "L4 correlation pred/correct (NoAT): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoAM): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoIN): ", corrsPredCorrectNoATL4[-1]
print "NoNoise sequence:", [list(x)[:2] for x in sequences[numseq]]
print "Noise sequence:", [list(x)[:2] for x in noisySequences[numseq]]
print "NoNoise L4 responses:", [list(x)[:2] for x in responsesNoNoise[numseq]['L4Responses']]
print "NoFB L4 responses:", [list(x)[:2] for x in responsesNoFB[numseq]['L4Responses']]
print ""
plt.figure()
allDataSets = (corrsPredCorrectFBL4, corrsPredCorrectNoFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
allmeans = [numpy.mean(x) for x in allDataSets]
allstds = [numpy.std(x) for x in allDataSets]
nbbars = len(allmeans)
plt.bar(2*(1+numpy.arange(nbbars))-.5, allmeans, 1.0, color='r', edgecolor='none', yerr=allstds, capsize=5, ecolor='k')
for nn in range(1, nbbars):
plt.vlines([2, 2 +2*nn], 1.2, 1.2+(nn/10.0), lw=2); plt.hlines(1.2+(nn/10.0), 2, 2+2*nn, lw=2)
pval = scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(allDataSets[nn]).ravel())[1]
if pval > 0.05:
pvallabel = ' o' #r'$o$'
elif pval > 0.01:
pvallabel = '*'
elif pval > 0.001:
pvallabel = '**'
else:
pvallabel = '***'
plt.text(3, 1.2+(nn/10.0)+.02, pvallabel, fontdict={"size":14})
plt.xticks(2*(1+numpy.arange(nbbars)), ('Full', 'No\nFB', 'No Earlier\nFiring', 'No Thresold\nModulation', 'No Slower\nDynamics'))
plt.ylabel("Avg. Prediction Performance");
plt.title(plotTitle)
plt.savefig(plotTitle+".png")
# scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(corrsPredCorrectNoATL4).ravel())
plt.show()
return (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4, corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
if __name__ == "__main__":
plt.ion()
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, no shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, no shared range")
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, shared range")
#
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, no shared range")
| agpl-3.0 |
tracierenea/gnuradio | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
niltonlk/nest-simulator | pynest/examples/spatial/test_3d.py | 14 | 2140 | # -*- coding: utf-8 -*-
#
# test_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
A spatial network in 3D
-------------------------
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
nest.ResetKernel()
pos = nest.spatial.free(nest.random.uniform(-0.5, 0.5), extent=[1.5, 1.5, 1.5])
l1 = nest.Create('iaf_psc_alpha', 1000, positions=pos)
# visualize
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*nest.GetPosition(l1))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b')
# full connections in box volume [-0.2,0.2]**3
nest.Connect(l1, l1,
{'rule': 'pairwise_bernoulli',
'p': 1.,
'allow_autapses': False,
'mask': {'box': {'lower_left': [-0.2, -0.2, -0.2],
'upper_right': [0.2, 0.2, 0.2]}}})
# show connections from center element
# sender shown in red, targets in green
ctr = nest.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*nest.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = nest.GetPosition(ctr)
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = nest.GetTargetNodes(ctr, l1)[0]
distances = nest.Distance(ctr, l1)
tgt_distances = [d for i, d in enumerate(distances) if i + 1 in tgts]
plt.figure()
plt.hist(tgt_distances, 25)
plt.show()
| gpl-2.0 |
iABC2XYZ/abc | DM_Twiss/TwissTrain3.py | 2 | 4285 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 13:37:16 2017
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
Check that the Distribution generation method is right.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from Orth import LambdaR,OrthTrans
from TFOrth import TFLambdaR,TFOrthTrans
plt.close('all')
emitX=4.8
alphaX=-2.3
betaX=15.3
gammaX=(1.+alphaX**2)/betaX
diagRX=LambdaR(emitX,alphaX,betaX,gammaX)
PX=OrthTrans(emitX,alphaX,betaX,gammaX)
numPart=np.int32(1e5)
Z=np.random.randn(2,numPart)
X=np.matmul(np.matmul(PX,np.linalg.inv(diagRX)),Z)
plt.figure(1)
plt.plot(X[0,:],X[1,:],'r.')
plt.axis('equal')
##
def WeightP(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def WeightLambda2D():
lambda1=tf.Variable(tf.random_uniform([1,1]),dtype=tf.float32)
lambda2=tf.Variable(tf.random_uniform([1,1]),dtype=tf.float32)
O=tf.reshape(tf.constant(0,tf.float32),[1,1])
LambdaR1=tf.concat([lambda1,O],0)
LambdaR2=tf.concat([O,lambda2],0)
LambdaR=tf.concat([LambdaR1,LambdaR2],1)
return LambdaR
P_1=WeightP([2,2])
LambdaR=WeightLambda2D()
xI=tf.placeholder(tf.float32,[2,None])
xL1=tf.matmul(P_1,xI)
xO=tf.matmul(LambdaR,xL1)
xR=xO[0]**2+xO[1]**2
lossXR=(xR-2.)**2
rateLearn=5e-4
optXR=tf.train.AdamOptimizer(rateLearn)
trainXR=optXR.minimize(lossXR)
meanLossXR=tf.reduce_mean(lossXR)
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess.run(tf.global_variables_initializer())
sizeBatch=64
for _ in xrange(30000):
startBatch=np.random.randint(0,high=numPart-sizeBatch-1)
xFeed=X[:,startBatch:startBatch+sizeBatch:]
sess.run(trainXR,feed_dict={xI:xFeed})
#print(sess.run(LambdaR))
#print('---------------------------')
print(sess.run(meanLossXR,feed_dict={xI:X}))
print('_______________________________________________')
'''
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.clf()
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
plt.pause(0.001)
'''
LambdaRGet=sess.run(LambdaR)
print(LambdaRGet)
print('---------------------------')
print(1./(LambdaRGet[0,0]*LambdaRGet[1,1]))
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
'''
print(sess.run(P_1))
print(sess.run(LambdaR))
print(sess.run(xR,feed_dict={xI:X}))
'''
'''
wEmit=tf.Variable([emitX])
wAlpha=tf.Variable([alphaX])
wBeta=tf.Variable([betaX])
wGamma=tf.Variable([gammaX])
'''
'''
wEmit=tf.Variable([13.])
wAlpha=tf.Variable([1.3])
wBeta=tf.Variable([0.5])
#wGamma=tf.Variable([0.5])
wGamma=(1.+wAlpha**2)/wBeta
xH=tf.placeholder(tf.float32,[2,None])
diagR,diagRT=TFLambdaR(wEmit,wAlpha,wBeta,wGamma)
P,PI=TFOrthTrans(wEmit,wAlpha,wBeta,wGamma)
zH=tf.matmul(tf.matmul(diagR,PI),xH)
R=zH[0]**2+zH[1]**2
#lossR=tf.abs(R-2.e-6)
lossR=R
optR=tf.train.GradientDescentOptimizer(0.01)
trainR=optR.minimize(lossR)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
#sess.run(diagR)
print(sess.run(R,feed_dict={xH:X}))
numIter=10
recEmit=np.zeros(numIter)
recAlpha=np.zeros(numIter)
recBeta=np.zeros(numIter)
recGamma=np.zeros(numIter)
recLoss=np.zeros(numIter)
for _ in xrange(numIter):
sess.run(trainR,feed_dict={xH:X})
recEmit[_]=sess.run(wEmit)
recAlpha[_]=sess.run(wAlpha)
recBeta[_]=sess.run(wBeta)
recGamma[_]=sess.run(wGamma)
recLoss[_]=sess.run(tf.reduce_mean(lossR))
print(recEmit)
print(recAlpha)
#print(sess.run(R,feed_dict={xH:X}))
plt.figure('emit')
plt.plot(recEmit)
plt.figure('alpha')
plt.plot(recAlpha)
plt.figure('beta')
plt.plot(recBeta)
plt.figure('gamma')
plt.plot(recGamma)
plt.figure('Loss')
plt.plot(recLoss)
'''
'''
zGet=sess.run(zH,feed_dict={xH:X})
print(sess.run(lossR,feed_dict={xH:X}))
'''
'''
plt.figure('Check')
plt.hold('on')
plt.plot(Z[0,:],Z[1,:],'bo')
plt.plot(zGet[0,:],zGet[1,:],'r.')
plt.axis('equal')
'''
'''
print(sess.run(wEmit))
print(sess.run(wAlpha))
print(sess.run(wBeta))
print(sess.run(wGamma))
print(sess.run(diagR))
print(sess.run(diagRT))
'''
#print(PX)
#print(sess.run(P))
#print(sess.run(zH,feed_dict={xH:X}))
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_stats_cluster_methods.py | 6 | 8607 | # doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
For more information, see:
Ridgway et al. 2012, "The problem of low variance voxels in
statistical parametric mapping; a new hat avoids a 'haircut'",
NeuroImage. 2012 Feb 1;59(3):2131-41.
Smith and Nichols 2009, "Threshold-free cluster enhancement:
addressing problems of smoothing, threshold dependence, and
localisation in cluster inference", NeuroImage 44 (2009) 83-98.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
# this changes hidden MPL vars:
from mpl_toolkits.mplot3d import Axes3D # noqa
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
# --------------
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# ------------------------
#
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# ------------------
#
# .. note::
# X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions:
X = X.reshape((n_subjects, 1, n_src))
###############################################################################
# Now let's do some clustering using the standard method.
#
# .. note::
# Not specifying a connectivity matrix implies grid-like connectivity,
# which we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
# -----------------
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
| bsd-3-clause |
fw1121/Roary | contrib/roary_plots/roary_plots.py | 1 | 5754 | #!/usr/bin/env python
# Copyright (C) <2015> EMBL-European Bioinformatics Institute
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Neither the institution name nor the name roary_plots
# can be used to endorse or promote products derived from
# this software without prior written permission.
# For written permission, please contact <marco@ebi.ac.uk>.
# Products derived from this software may not be called roary_plots
# nor may roary_plots appear in their names without prior written
# permission of the developers. You should have received a copy
# of the GNU General Public License along with this program.
# If not, see <http://www.gnu.org/licenses/>.
__author__ = "Marco Galardini"
__version__ = '0.1.0'
def get_options():
import argparse
# create the top-level parser
description = "Create plots from roary outputs"
parser = argparse.ArgumentParser(description = description,
prog = 'roary_plots.py')
parser.add_argument('tree', action='store',
help='Newick Tree file', default='accessory_binary_genes.fa.newick')
parser.add_argument('spreadsheet', action='store',
help='Roary gene presence/absence spreadsheet', default='gene_presence_absence.csv')
parser.add_argument('--version', action='version',
version='%(prog)s '+__version__)
return parser.parse_args()
if __name__ == "__main__":
options = get_options()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
import os
import pandas as pd
import numpy as np
from Bio import Phylo
t = Phylo.read(options.tree, 'newick')
# Max distance to create better plots
mdist = max([t.distance(t.root, x) for x in t.get_terminals()])
# Load roary
roary = pd.read_table(options.spreadsheet,
sep=',',
low_memory=False)
# Set index (group name)
roary.set_index('Gene', inplace=True)
# Drop the other info columns
roary.drop(list(roary.columns[:10]), axis=1, inplace=True)
# Transform it in a presence/absence matrix (1/0)
roary.replace('.{2,100}', 1, regex=True, inplace=True)
roary.replace(np.nan, 0, regex=True, inplace=True)
# Sort the matrix by the sum of strains presence
idx = roary.sum(axis=1).order(ascending=False).index
roary_sorted = roary.ix[idx]
# Pangenome frequency plot
plt.figure(figsize=(7, 5))
plt.hist(roary.sum(axis=1), roary.shape[1],
histtype="stepfilled", alpha=.7)
plt.xlabel('Number of genomes')
plt.ylabel('Number of genes')
sns.despine(left=True,
bottom=True)
plt.savefig('pangenome_frequency.png')
plt.clf()
# Sort the matrix according to tip labels in the tree
roary_sorted = roary_sorted[[x.name for x in t.get_terminals()]]
# Plot presence/absence matrix against the tree
with sns.axes_style('whitegrid'):
fig = plt.figure(figsize=(17, 10))
ax1=plt.subplot2grid((1,40), (0, 10), colspan=30)
a=ax1.matshow(roary_sorted.T, cmap=plt.cm.Blues,
vmin=0, vmax=1,
aspect='auto',
interpolation='none',
)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.axis('off')
ax = fig.add_subplot(1,2,1)
ax=plt.subplot2grid((1,40), (0, 0), colspan=10, axisbg='white')
fig.subplots_adjust(wspace=0, hspace=0)
ax1.set_title('Roary matrix\n(%d gene clusters)'%roary.shape[0])
Phylo.draw(t, axes=ax,
show_confidence=False,
label_func=lambda x: None,
xticks=([],), yticks=([],),
ylabel=('',), xlabel=('',),
xlim=(-0.01,mdist+0.01),
axis=('off',),
title=('parSNP tree\n(%d strains)'%roary.shape[1],),
do_show=False,
)
plt.savefig('pangenome_matrix.png')
plt.clf()
# Plot the pangenome pie chart
plt.figure(figsize=(10, 10))
core = roary[(roary.sum(axis=1) >= roary.shape[1]*0.99) & (roary.sum(axis=1) <= roary.shape[1] )].shape[0]
softcore = roary[(roary.sum(axis=1) >= roary.shape[1]*0.95) & (roary.sum(axis=1) < roary.shape[1]*0.99)].shape[0]
shell = roary[(roary.sum(axis=1) >= roary.shape[1]*0.15) & (roary.sum(axis=1) < roary.shape[1]*0.95)].shape[0]
cloud = roary[roary.sum(axis=1) < roary.shape[1]*0.15].shape[0]
total = roary.shape[0]
def my_autopct(pct):
val=int(round(pct*total/100.0))
return '{v:d}'.format(v=val)
a=plt.pie([core, softcore, shell, cloud],
labels=['core\n(%d <= strains <= %d)'%(roary.shape[1]*.99,roary.shape[1]),
'soft-core\n(%d <= strains < %d)'%(roary.shape[1]*.95,roary.shape[1]*.99),
'shell\n(%d <= strains < %d)'%(roary.shape[1]*.15,roary.shape[1]*.95),
'cloud\n(strains < %d)'%(roary.shape[1]*.15)],
explode=[0.1, 0.05, 0.02, 0], radius=0.9,
colors=[(0, 0, 1, float(x)/total) for x in (core, softcore, shell, cloud)],
autopct=my_autopct)
plt.savefig('pangenome_pie.png')
plt.clf()
| gpl-3.0 |
nrhine1/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
hantek/BinaryConnect | mnist.py | 1 | 6258 | # Copyright 2015 Matthieu Courbariaux, Zhouhan Lin
"""
This file is adapted from BinaryConnect:
https://github.com/MatthieuCourbariaux/BinaryConnect
Running this script should reproduce the results of a feed forward net trained
on MNIST.
To train a vanilla feed forward net with ordinary backprop:
1. type "git checkout fullresolution" to switch to the "fullresolution" branch
2. execute "python mnist.py"
To train a feed forward net with Binary Connect + quantized backprop:
1. type "git checkout binary" to switch to the "binary" branch
2. execute "python mnist.py"
To train a feed forward net with Ternary Connect + quantized backprop:
1. type "git checkout ternary" to switch to the "ternary" branch
2. execute "python mnist.py"
"""
import gzip
import cPickle
import numpy as np
import os
import os.path
import sys
import time
from trainer import Trainer
from model import Network
from layer import linear_layer, ReLU_layer
from pylearn2.datasets.mnist import MNIST
from pylearn2.utils import serial
if __name__ == "__main__":
rng = np.random.RandomState(1234)
train_set_size = 50000
# data augmentation
zero_pad = 0
affine_transform_a = 0
affine_transform_b = 0
horizontal_flip = False
# batch
# keep a multiple a factor of 10000 if possible
# 10000 = (2*5)^4
batch_size = 200
number_of_batches_on_gpu = train_set_size/batch_size
BN = True
BN_epsilon=1e-4 # for numerical stability
BN_fast_eval= True
dropout_input = 1.
dropout_hidden = 1.
shuffle_examples = True
shuffle_batches = False
# Termination criteria
n_epoch = 1000
monitor_step = 2
# LR
LR = .3
LR_fin = .01
LR_decay = (LR_fin/LR)**(1./n_epoch)
M= 0.
# architecture
n_inputs = 784
n_units = 1024
n_classes = 10
n_hidden_layer = 3
# BinaryConnect
BinaryConnect = True
stochastic = True
# Old hyperparameters
binary_training=False
stochastic_training=False
binary_test=False
stochastic_test=False
if BinaryConnect == True:
binary_training=True
if stochastic == True:
stochastic_training=True
else:
binary_test=True
print 'Loading the dataset'
train_set = MNIST(which_set= 'train', start=0, stop = train_set_size, center = True)
valid_set = MNIST(which_set= 'train', start=50000, stop = 60000, center = True)
test_set = MNIST(which_set= 'test', center = True)
# bc01 format
train_set.X = train_set.X.reshape(train_set_size,1,28,28)
valid_set.X = valid_set.X.reshape(10000,1,28,28)
test_set.X = test_set.X.reshape(10000,1,28,28)
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2* train_set.y - 1.
valid_set.y = 2* valid_set.y - 1.
test_set.y = 2* test_set.y - 1.
print 'Creating the model'
class PI_MNIST_model(Network):
def __init__(self, rng):
Network.__init__(self, n_hidden_layer = n_hidden_layer, BN = BN)
print " Fully connected layer 1:"
self.layer.append(ReLU_layer(rng = rng, n_inputs = n_inputs, n_units = n_units,
BN = BN, BN_epsilon=BN_epsilon, dropout=dropout_input,
binary_training=binary_training, stochastic_training=stochastic_training,
binary_test=binary_test, stochastic_test=stochastic_test))
for k in range(n_hidden_layer-1):
print " Fully connected layer "+ str(k) +":"
self.layer.append(ReLU_layer(rng = rng, n_inputs = n_units, n_units = n_units,
BN = BN, BN_epsilon=BN_epsilon, dropout=dropout_hidden,
binary_training=binary_training, stochastic_training=stochastic_training,
binary_test=binary_test, stochastic_test=stochastic_test))
print " L2 SVM layer:"
self.layer.append(linear_layer(rng = rng, n_inputs = n_units, n_units = n_classes,
BN = BN, BN_epsilon=BN_epsilon, dropout=dropout_hidden,
binary_training=binary_training, stochastic_training=stochastic_training,
binary_test=binary_test, stochastic_test=stochastic_test))
model = PI_MNIST_model(rng = rng)
print 'Creating the trainer'
trainer = Trainer(rng = rng,
train_set = train_set, valid_set = valid_set, test_set = test_set,
model = model, load_path = None, save_path = None,
zero_pad=zero_pad,
affine_transform_a=affine_transform_a, # a is (more or less) the rotations
affine_transform_b=affine_transform_b, # b is the translations
horizontal_flip=horizontal_flip,
LR = LR, LR_decay = LR_decay, LR_fin = LR_fin,
M = M,
BN = BN, BN_fast_eval=BN_fast_eval,
batch_size = batch_size, number_of_batches_on_gpu = number_of_batches_on_gpu,
n_epoch = n_epoch, monitor_step = monitor_step,
shuffle_batches = shuffle_batches, shuffle_examples = shuffle_examples)
print 'Building'
trainer.build()
print 'Training'
start_time = time.clock()
trainer.train()
end_time = time.clock()
print 'The training took %i seconds'%(end_time - start_time)
print 'Display weights'
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from filter_plot import tile_raster_images
W = np.transpose(model.layer[0].W.get_value())
W = tile_raster_images(W,(28,28),(4,4),(2, 2))
plt.imshow(W, cmap = cm.Greys_r)
plt.savefig(core_path + '_features.png')
| gpl-2.0 |
aewhatley/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
bmcfee/librosa | librosa/util/utils.py | 1 | 64787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions"""
import warnings
import scipy.ndimage
import scipy.sparse
import numpy as np
import numba
from numpy.lib.stride_tricks import as_strided
from .._cache import cache
from .exceptions import ParameterError
# Constrain STFT block sizes to 256 KB
MAX_MEM_BLOCK = 2 ** 8 * 2 ** 10
__all__ = [
"MAX_MEM_BLOCK",
"frame",
"pad_center",
"fix_length",
"valid_audio",
"valid_int",
"valid_intervals",
"fix_frames",
"axis_sort",
"localmax",
"localmin",
"normalize",
"peak_pick",
"sparsify_rows",
"shear",
"stack",
"fill_off_diagonal",
"index_to_slice",
"sync",
"softmask",
"buf_to_float",
"tiny",
"cyclic_gradient",
"dtype_r2c",
"dtype_c2r",
]
def frame(x, frame_length, hop_length, axis=-1):
"""Slice a data array into (overlapping) frames.
This implementation uses low-level stride manipulation to avoid
making a copy of the data. The resulting frame representation
is a new view of the same input data.
However, if the input data is not contiguous in memory, a warning
will be issued and the output will be a full copy, rather than
a view of the input data.
For example, a one-dimensional input ``x = [0, 1, 2, 3, 4, 5, 6]``
can be framed with frame length 3 and hop length 2 in two ways.
The first (``axis=-1``), results in the array ``x_frames``::
[[0, 2, 4],
[1, 3, 5],
[2, 4, 6]]
where each column ``x_frames[:, i]`` contains a contiguous slice of
the input ``x[i * hop_length : i * hop_length + frame_length]``.
The second way (``axis=0``) results in the array ``x_frames``::
[[0, 1, 2],
[2, 3, 4],
[4, 5, 6]]
where each row ``x_frames[i]`` contains a contiguous slice of the input.
This generalizes to higher dimensional inputs, as shown in the examples below.
In general, the framing operation increments by 1 the number of dimensions,
adding a new "frame axis" either to the end of the array (``axis=-1``)
or the beginning of the array (``axis=0``).
Parameters
----------
x : np.ndarray
Array to frame
frame_length : int > 0 [scalar]
Length of the frame
hop_length : int > 0 [scalar]
Number of steps to advance between frames
axis : 0 or -1
The axis along which to frame.
If ``axis=-1`` (the default), then ``x`` is framed along its last dimension.
``x`` must be "F-contiguous" in this case.
If ``axis=0``, then ``x`` is framed along its first dimension.
``x`` must be "C-contiguous" in this case.
Returns
-------
x_frames : np.ndarray [shape=(..., frame_length, N_FRAMES) or (N_FRAMES, frame_length, ...)]
A framed view of ``x``, for example with ``axis=-1`` (framing on the last dimension)::
x_frames[..., j] == x[..., j * hop_length : j * hop_length + frame_length]
If ``axis=0`` (framing on the first dimension), then::
x_frames[j] = x[j * hop_length : j * hop_length + frame_length]
Raises
------
ParameterError
If ``x`` is not an `np.ndarray`.
If ``x.shape[axis] < frame_length``, there is not enough data to fill one frame.
If ``hop_length < 1``, frames cannot advance.
If ``axis`` is not 0 or -1. Framing is only supported along the first or last axis.
See Also
--------
numpy.asfortranarray : Convert data to F-contiguous representation
numpy.ascontiguousarray : Convert data to C-contiguous representation
numpy.ndarray.flags : information about the memory layout of a numpy `ndarray`.
Examples
--------
Extract 2048-sample frames from monophonic signal with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
>>> frames
array([[-1.407e-03, -2.604e-02, ..., -1.795e-05, -8.108e-06],
[-4.461e-04, -3.721e-02, ..., -1.573e-05, -1.652e-05],
...,
[ 7.960e-02, -2.335e-01, ..., -6.815e-06, 1.266e-05],
[ 9.568e-02, -1.252e-01, ..., 7.397e-06, -1.921e-05]],
dtype=float32)
>>> y.shape
(117601,)
>>> frames.shape
(2048, 1806)
Or frame along the first axis instead of the last:
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64, axis=0)
>>> frames.shape
(1806, 2048)
Frame a stereo signal:
>>> y, sr = librosa.load(librosa.ex('trumpet', hq=True), mono=False)
>>> y.shape
(2, 117601)
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
(2, 2048, 1806)
Carve an STFT into fixed-length patches of 32 frames with 50% overlap
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> S.shape
(1025, 230)
>>> S_patch = librosa.util.frame(S, frame_length=32, hop_length=16)
>>> S_patch.shape
(1025, 32, 13)
>>> # The first patch contains the first 32 frames of S
>>> np.allclose(S_patch[:, :, 0], S[:, :32])
True
>>> # The second patch contains frames 16 to 16+32=48, and so on
>>> np.allclose(S_patch[:, :, 1], S[:, 16:48])
True
"""
if not isinstance(x, np.ndarray):
raise ParameterError(
"Input must be of type numpy.ndarray, " "given type(x)={}".format(type(x))
)
if x.shape[axis] < frame_length:
raise ParameterError(
"Input is too short (n={:d})"
" for frame_length={:d}".format(x.shape[axis], frame_length)
)
if hop_length < 1:
raise ParameterError("Invalid hop_length: {:d}".format(hop_length))
if axis == -1 and not x.flags["F_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.asfortranarray(x)
elif axis == 0 and not x.flags["C_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.ascontiguousarray(x)
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize
if axis == -1:
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * new_stride]
elif axis == 0:
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * new_stride] + list(strides)
else:
raise ParameterError("Frame axis={} must be either 0 or -1".format(axis))
return as_strided(x, shape=shape, strides=strides)
@cache(level=20)
def valid_audio(y, mono=True):
"""Determine whether a variable contains valid audio data.
If ``mono=True``, then ``y`` is only considered valid if it has shape
``(N,)`` (number of samples).
If ``mono=False``, then ``y`` may be either monophonic, or have shape
``(2, N)`` (stereo) or ``(K, N)`` for ``K>=2`` for general multi-channel.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to require monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
In any of these cases:
- ``type(y)`` is not ``np.ndarray``
- ``y.dtype`` is not floating-point
- ``mono == True`` and ``y.ndim`` is not 1
- ``mono == False`` and ``y.ndim`` is not 1 or 2
- ``mono == False`` and ``y.ndim == 2`` but ``y.shape[0] == 1``
- ``np.isfinite(y).all()`` is False
Notes
-----
This function caches at level 20.
Examples
--------
>>> # By default, valid_audio allows only mono signals
>>> filepath = librosa.ex('trumpet', hq=True)
>>> y_mono, sr = librosa.load(filepath, mono=True)
>>> y_stereo, _ = librosa.load(filepath, mono=False)
>>> librosa.util.valid_audio(y_mono), librosa.util.valid_audio(y_stereo)
True, False
>>> # To allow stereo signals, set mono=False
>>> librosa.util.valid_audio(y_stereo, mono=False)
True
See also
--------
numpy.float32
"""
if not isinstance(y, np.ndarray):
raise ParameterError("Audio data must be of type numpy.ndarray")
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError("Audio data must be floating-point")
if mono and y.ndim != 1:
raise ParameterError(
"Invalid shape for monophonic audio: "
"ndim={:d}, shape={}".format(y.ndim, y.shape)
)
elif y.ndim > 2 or y.ndim == 0:
raise ParameterError(
"Audio data must have shape (samples,) or (channels, samples). "
"Received shape={}".format(y.shape)
)
elif y.ndim == 2 and y.shape[0] < 2:
raise ParameterError(
"Mono data must have shape (samples,). " "Received shape={}".format(y.shape)
)
if not np.isfinite(y).all():
raise ParameterError("Audio buffer is not finite everywhere")
return True
def valid_int(x, cast=None):
"""Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify ``x`` before casting.
Default: `np.floor`
Returns
-------
x_int : int
``x_int = int(cast(x))``
Raises
------
ParameterError
If ``cast`` is provided and is not callable.
"""
if cast is None:
cast = np.floor
if not callable(cast):
raise ParameterError("cast parameter must be callable")
return int(cast(x))
def valid_intervals(intervals):
"""Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if ``intervals`` passes validation.
"""
if intervals.ndim != 2 or intervals.shape[-1] != 2:
raise ParameterError("intervals must have shape (n, 2)")
if np.any(intervals[:, 0] > intervals[:, 1]):
raise ParameterError(
"intervals={} must have non-negative durations".format(intervals)
)
return True
def pad_center(data, size, axis=-1, **kwargs):
"""Pad an array to a target length along a target axis.
This differs from `np.pad` by centering the data prior to padding,
analogous to `str.center`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad ``data``
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad`
Returns
-------
data_padded : np.ndarray
``data`` centered and padded to length ``size`` along the
specified axis
Raises
------
ParameterError
If ``size < data.shape[axis]``
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise ParameterError(
("Target size ({:d}) must be " "at least input size ({:d})").format(size, n)
)
return np.pad(data, lengths, **kwargs)
def fix_length(data, size, axis=-1, **kwargs):
"""Fix the length an array ``data`` to exactly ``size`` along a target axis.
If ``data.shape[axis] < n``, pad according to the provided kwargs.
By default, ``data`` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to ``np.pad``
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
``data`` either trimmed or padded to length ``size``
along the specified axis.
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
def fix_frames(frames, x_min=0, x_max=None, pad=True):
"""Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If ``True``, then ``frames`` is expanded to span the full range
``[x_min, x_max]``
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If ``frames`` contains negative values
"""
frames = np.asarray(frames)
if np.any(frames < 0):
raise ParameterError("Negative frame index detected")
if pad and (x_min is not None or x_max is not None):
frames = np.clip(frames, x_min, x_max)
if pad:
pad_data = []
if x_min is not None:
pad_data.append(x_min)
if x_max is not None:
pad_data.append(x_max)
frames = np.concatenate((pad_data, frames))
if x_min is not None:
frames = frames[frames >= x_min]
if x_max is not None:
frames = frames[frames <= x_max]
return np.unique(frames).astype(int)
def axis_sort(S, axis=-1, index=False, value=None):
"""Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=64)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, ncols=2)
>>> img_w = librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log', ax=ax[0, 0])
>>> ax[0, 0].set(title='W')
>>> ax[0, 0].label_outer()
>>> img_act = librosa.display.specshow(H, x_axis='time', ax=ax[0, 1])
>>> ax[0, 1].set(title='H')
>>> ax[0, 1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log', ax=ax[1, 0])
>>> ax[1, 0].set(title='W sorted')
>>> librosa.display.specshow(H_sort, x_axis='time', ax=ax[1, 1])
>>> ax[1, 1].set(title='H sorted')
>>> ax[1, 1].label_outer()
>>> fig.colorbar(img_w, ax=ax[:, 0], orientation='horizontal')
>>> fig.colorbar(img_act, ax=ax[:, 1], orientation='horizontal')
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- ``axis=0`` to sort rows by peak column index
- ``axis=1`` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
``S`` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If ``index == True``, the sorting index used to permute ``S``.
Length of ``idx`` corresponds to the selected ``axis``.
Raises
------
ParameterError
If ``S`` does not have exactly 2 dimensions (``S.ndim != 2``)
"""
if value is None:
value = np.argmax
if S.ndim != 2:
raise ParameterError("axis_sort is only defined for 2D arrays")
bin_idx = value(S, axis=np.mod(1 - axis, S.ndim))
idx = np.argsort(bin_idx)
sort_slice = [slice(None)] * S.ndim
sort_slice[axis] = idx
if index:
return S[tuple(sort_slice)], idx
else:
return S[tuple(sort_slice)]
@cache(level=40)
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
"""Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that::
norm(S, axis=axis) == 1
For example, ``axis=0`` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, ``axis=1`` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
``threshold`` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : mininum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least ``threshold`` are
normalized.
By default, the threshold is determined from
the numerical precision of ``S.dtype``.
fill : None or bool
If None, then columns (or rows) with norm below ``threshold``
are left as is.
If False, then columns (rows) with norm below ``threshold``
are set to 0.
If True, then columns (rows) with norm below ``threshold``
are filled uniformly such that the corresponding norm is 1.
.. note:: ``fill=True`` is incompatible with ``norm=0`` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If ``norm`` is not among the valid types defined above
If ``S`` is not finite
If ``fill=True`` and ``norm=0``
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
"""
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise ParameterError(
"threshold={} must be strictly " "positive".format(threshold)
)
if fill not in [None, False, True]:
raise ParameterError("fill={} must be None or boolean".format(fill))
if not np.all(np.isfinite(S)):
raise ParameterError("Input must be finite")
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise ParameterError("Cannot normalize with norm=0 and fill=True")
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag ** norm, axis=axis, keepdims=True) ** (1.0 / norm)
if axis is None:
fill_norm = mag.size ** (-1.0 / norm)
else:
fill_norm = mag.shape[axis] ** (-1.0 / norm)
elif norm is None:
return S
else:
raise ParameterError("Unsupported norm: {}".format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
def localmax(x, axis=0):
"""Find local maxima in an array
An element ``x[i]`` is considered a local maximum if the following
conditions are met:
- ``x[i] > x[i-1]``
- ``x[i] >= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along ``axis``
See Also
--------
localmin
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)])
def localmin(x, axis=0):
"""Find local minima in an array
An element ``x[i]`` is considered a local minimum if the following
conditions are met:
- ``x[i] < x[i-1]``
- ``x[i] <= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local minimum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmin(x)
array([False, True, False, False, True, False, True, False])
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmin(x, axis=0)
array([[False, False, False],
[False, True, True],
[False, False, False]])
>>> librosa.util.localmin(x, axis=1)
array([[False, True, False],
[False, True, False],
[False, True, False]])
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local minimality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local minimality along ``axis``
See Also
--------
localmax
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x < x_pad[tuple(inds1)]) & (x <= x_pad[tuple(inds2)])
def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
"""Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding ``x[n]``
fulfills the following three conditions:
1. ``x[n] == max(x[n - pre_max:n + post_max])``
2. ``x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta``
3. ``n - previous_n > wait``
where ``previous_n`` is the last sample picked as a peak (greedily).
This implementation is based on [#]_ and [#]_.
.. [#] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [#] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before ``n`` over which max is computed
post_max : int >= 1 [scalar]
number of samples after ``n`` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before ``n`` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after ``n`` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in ``x``
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 3, 27, 40, 61, 72, 88, 103])
>>> import matplotlib.pyplot as plt
>>> times = librosa.times_like(onset_env, sr=sr, hop_length=512)
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> D = np.abs(librosa.stft(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[1])
>>> ax[0].plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> ax[0].vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> ax[0].legend(frameon=True, framealpha=0.8)
>>> ax[0].label_outer()
"""
if pre_max < 0:
raise ParameterError("pre_max must be non-negative")
if pre_avg < 0:
raise ParameterError("pre_avg must be non-negative")
if delta < 0:
raise ParameterError("delta must be non-negative")
if wait < 0:
raise ParameterError("wait must be non-negative")
if post_max <= 0:
raise ParameterError("post_max must be positive")
if post_avg <= 0:
raise ParameterError("post_avg must be positive")
if x.ndim != 1:
raise ParameterError("input array must be one-dimensional")
# Ensure valid index types
pre_max = valid_int(pre_max, cast=np.ceil)
post_max = valid_int(post_max, cast=np.ceil)
pre_avg = valid_int(pre_avg, cast=np.ceil)
post_avg = valid_int(post_avg, cast=np.ceil)
wait = valid_int(wait, cast=np.ceil)
# Get the maximum of the signal over a sliding window
max_length = pre_max + post_max
max_origin = np.ceil(0.5 * (pre_max - post_max))
# Using mode='constant' and cval=x.min() effectively truncates
# the sliding window at the boundaries
mov_max = scipy.ndimage.filters.maximum_filter1d(
x, int(max_length), mode="constant", origin=int(max_origin), cval=x.min()
)
# Get the mean of the signal over a sliding window
avg_length = pre_avg + post_avg
avg_origin = np.ceil(0.5 * (pre_avg - post_avg))
# Here, there is no mode which results in the behavior we want,
# so we'll correct below.
mov_avg = scipy.ndimage.filters.uniform_filter1d(
x, int(avg_length), mode="nearest", origin=int(avg_origin)
)
# Correct sliding average at the beginning
n = 0
# Only need to correct in the range where the window needs to be truncated
while n - pre_avg < 0 and n < x.shape[0]:
# This just explicitly does mean(x[n - pre_avg:n + post_avg])
# with truncation
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# Correct sliding average at the end
n = x.shape[0] - post_avg
# When post_avg > x.shape[0] (weird case), reset to 0
n = n if n > 0 else 0
while n < x.shape[0]:
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# First mask out all entries not equal to the local max
detections = x * (x == mov_max)
# Then mask out all entries less than the thresholded average
detections = detections * (detections >= (mov_avg + delta))
# Initialize peaks array, to be filled greedily
peaks = []
# Remove onsets which are close together in time
last_onset = -np.inf
for i in np.nonzero(detections)[0]:
# Only report an onset if the "wait" samples was reported
if i > last_onset + wait:
peaks.append(i)
# Save last reported onset
last_onset = i
return np.array(peaks)
@cache(level=40)
def sparsify_rows(x, quantile=0.01, dtype=None):
"""Return a row-sparse matrix approximating the input
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of ``x``
dtype : np.dtype, optional
The dtype of the output array.
If not provided, then ``x.dtype`` will be used.
Returns
-------
x_sparse : ``scipy.sparse.csr_matrix`` [shape=x.shape]
Row-sparsified approximation of ``x``
If ``x.ndim == 1``, then ``x`` is interpreted as a row vector,
and ``x_sparse.shape == (1, len(x))``.
Raises
------
ParameterError
If ``x.ndim > 2``
If ``quantile`` lies outside ``[0, 1.0)``
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
"""
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError(
"Input must have 2 or fewer dimensions. "
"Provided x.shape={}.".format(x.shape)
)
if not 0.0 <= quantile < 1:
raise ParameterError("Invalid quantile {:.2f}".format(quantile))
if dtype is None:
dtype = x.dtype
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = "<i{:d}".format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True):
"""Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min, idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad ``idx`` to span the range ``idx_min:idx_max``.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
``idx_min`` and ``idx_max``.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
"""
# First, normalize the index set
idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad)
# Now convert the indices to slices
return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])]
@cache(level=40)
def sync(data, idx, aggregate=None, pad=True, axis=-1):
"""Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to ``idx``.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, ``idx`` is padded to span the full range ``[0, data.shape[axis]]``
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
``data_sync`` will have the same dimension as ``data``, except that the ``axis``
coordinate will be reduced according to ``idx``.
For example, a 2-dimensional ``data`` with ``axis=-1`` should satisfy::
data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.ex('choice'))
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> fig, ax = plt.subplots(nrows=3, sharex=True, sharey=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time', ax=ax[0])
>>> ax[0].set(title='CQT power, shape={}'.format(C.shape))
>>> ax[0].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time', ax=ax[1])
>>> ax[1].set(title='Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> ax[1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time', ax=ax[2])
>>> ax[2].set(title='Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
"""
if aggregate is None:
aggregate = np.mean
shape = list(data.shape)
if np.all([isinstance(_, slice) for _ in idx]):
slices = idx
elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]):
slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad)
else:
raise ParameterError("Invalid index set: {}".format(idx))
agg_shape = list(shape)
agg_shape[axis] = len(slices)
data_agg = np.empty(
agg_shape, order="F" if np.isfortran(data) else "C", dtype=data.dtype
)
idx_in = [slice(None)] * data.ndim
idx_agg = [slice(None)] * data_agg.ndim
for (i, segment) in enumerate(slices):
idx_in[axis] = segment
idx_agg[axis] = i
data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis)
return data_agg
def softmask(X, X_ref, power=1, split_zeros=False):
"""Robustly compute a soft-mask operation.
``M = X**power / (X**power + X_ref**power)``
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as ``X``.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to ``X > X_ref``.
Note: for hard masks, ties are always broken in favor of ``X_ref`` (``mask=0``).
split_zeros : bool
If `True`, entries where ``X`` and ``X_ref`` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=X.shape
The output mask array
Raises
------
ParameterError
If ``X`` and ``X_ref`` have different shapes.
If ``X`` or ``X_ref`` are negative anywhere
If ``power <= 0``
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
"""
if X.shape != X_ref.shape:
raise ParameterError("Shape mismatch: {}!={}".format(X.shape, X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError("X and X_ref must be non-negative")
if power <= 0:
raise ParameterError("power must be strictly positive")
# We're working with ints, cast to float.
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
# Re-scale the input arrays relative to the larger value
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = Z < np.finfo(dtype).tiny
Z[bad_idx] = 1
# For finite power, compute the softmask
if np.isfinite(power):
mask = (X / Z) ** power
ref_mask = (X_ref / Z) ** power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
# Wherever energy is below energy in both inputs, split the mask
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
# Otherwise, compute the hard mask
mask = X > X_ref
return mask
def tiny(x):
"""Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in ``x.dtype``
(e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is ``x.dtype``
Returns
-------
tiny_value : float
The smallest positive usable number for the type of ``x``.
If ``x`` is integer-typed, then the tiny value for ``np.float32``
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
"""
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(
x.dtype, np.complexfloating
):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
def cyclic_gradient(data, edge_order=1, axis=-1):
"""Estimate the gradient of a function over a uniformly sampled,
periodic domain.
This is essentially the same as `np.gradient`, except that edge effects
are handled by wrapping the observations (i.e. assuming periodicity)
rather than extrapolation.
Parameters
----------
data : np.ndarray
The function values observed at uniformly spaced positions on
a periodic domain
edge_order: {1, 2}
The order of the difference approximation used for estimating
the gradient
axis : int
The axis along which gradients are calculated.
Returns
-------
grad : np.ndarray like ``data``
The gradient of ``data`` taken along the specified axis.
See Also
--------
numpy.gradient
Examples
--------
This example estimates the gradient of cosine (-sine) from 64
samples using direct (aperiodic) and periodic gradient
calculation.
>>> import matplotlib.pyplot as plt
>>> x = 2 * np.pi * np.linspace(0, 1, num=64, endpoint=False)
>>> y = np.cos(x)
>>> grad = np.gradient(y)
>>> cyclic_grad = librosa.util.cyclic_gradient(y)
>>> true_grad = -np.sin(x) * 2 * np.pi / len(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, true_grad, label='True gradient', linewidth=5,
... alpha=0.35)
>>> ax.plot(x, cyclic_grad, label='cyclic_gradient')
>>> ax.plot(x, grad, label='np.gradient', linestyle=':')
>>> ax.legend()
>>> # Zoom into the first part of the sequence
>>> ax.set(xlim=[0, np.pi/16], ylim=[-0.025, 0.025])
"""
# Wrap-pad the data along the target axis by `edge_order` on each side
padding = [(0, 0)] * data.ndim
padding[axis] = (edge_order, edge_order)
data_pad = np.pad(data, padding, mode="wrap")
# Compute the gradient
grad = np.gradient(data_pad, edge_order=edge_order, axis=axis)
# Remove the padding
slices = [slice(None)] * data.ndim
slices[axis] = slice(edge_order, -edge_order)
return grad[tuple(slices)]
@numba.jit(nopython=True, cache=True)
def __shear_dense(X, factor=+1, axis=-1):
"""Numba-accelerated shear for dense (ndarray) arrays"""
if axis == 0:
X = X.T
X_shear = np.empty_like(X)
for i in range(X.shape[1]):
X_shear[:, i] = np.roll(X[:, i], factor * i)
if axis == 0:
X_shear = X_shear.T
return X_shear
def __shear_sparse(X, factor=+1, axis=-1):
"""Fast shearing for sparse matrices
Shearing is performed using CSC array indices,
and the result is converted back to whatever sparse format
the data was originally provided in.
"""
fmt = X.format
if axis == 0:
X = X.T
# Now we're definitely rolling on the correct axis
X_shear = X.tocsc(copy=True)
# The idea here is to repeat the shear amount (factor * range)
# by the number of non-zeros for each column.
# The number of non-zeros is computed by diffing the index pointer array
roll = np.repeat(factor * np.arange(X_shear.shape[1]), np.diff(X_shear.indptr))
# In-place roll
np.mod(X_shear.indices + roll, X_shear.shape[0], out=X_shear.indices)
if axis == 0:
X_shear = X_shear.T
# And convert back to the input format
return X_shear.asformat(fmt)
def shear(X, factor=1, axis=-1):
"""Shear a matrix by a given factor.
The column ``X[:, n]`` will be displaced (rolled)
by ``factor * n``
This is primarily useful for converting between lag and recurrence
representations: shearing with ``factor=-1`` converts the main diagonal
to a horizontal. Shearing with ``factor=1`` converts a horizontal to
a diagonal.
Parameters
----------
X : np.ndarray [ndim=2] or scipy.sparse matrix
The array to be sheared
factor : integer
The shear factor: ``X[:, n] -> np.roll(X[:, n], factor * n)``
axis : integer
The axis along which to shear
Returns
-------
X_shear : same type as ``X``
The sheared matrix
Examples
--------
>>> E = np.eye(3)
>>> librosa.util.shear(E, factor=-1, axis=-1)
array([[1., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]])
>>> librosa.util.shear(E, factor=-1, axis=0)
array([[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]])
>>> librosa.util.shear(E, factor=1, axis=-1)
array([[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]])
"""
if not np.issubdtype(type(factor), np.integer):
raise ParameterError("factor={} must be integer-valued".format(factor))
if scipy.sparse.isspmatrix(X):
return __shear_sparse(X, factor=factor, axis=axis)
else:
return __shear_dense(X, factor=factor, axis=axis)
def stack(arrays, axis=0):
"""Stack one or more arrays along a target axis.
This function is similar to `np.stack`, except that memory contiguity is
retained when stacking along the first dimension.
This is useful when combining multiple monophonic audio signals into a
multi-channel signal, or when stacking multiple feature representations
to form a multi-dimensional array.
Parameters
----------
arrays : list
one or more `np.ndarray`
axis : integer
The target axis along which to stack. ``axis=0`` creates a new first axis,
and ``axis=-1`` creates a new last axis.
Returns
-------
arr_stack : np.ndarray [shape=(len(arrays), array_shape) or shape=(array_shape, len(arrays))]
The input arrays, stacked along the target dimension.
If ``axis=0``, then ``arr_stack`` will be F-contiguous.
Otherwise, ``arr_stack`` will be C-contiguous by default, as computed by
`np.stack`.
Raises
------
ParameterError
- If ``arrays`` do not all have the same shape
- If no ``arrays`` are given
See Also
--------
numpy.stack
numpy.ndarray.flags
frame
Examples
--------
Combine two buffers into a contiguous arrays
>>> y_left = np.ones(5)
>>> y_right = -np.ones(5)
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=0)
>>> y_stereo
array([[ 1., 1., 1., 1., 1.],
[-1., -1., -1., -1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
Or along the trailing axis
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=-1)
>>> y_stereo
array([[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
shapes = {arr.shape for arr in arrays}
if len(shapes) > 1:
raise ParameterError("all input arrays must have the same shape")
elif len(shapes) < 1:
raise ParameterError("at least one input array must be provided for stack")
shape_in = shapes.pop()
if axis != 0:
return np.stack(arrays, axis=axis)
else:
# If axis is 0, enforce F-ordering
shape = tuple([len(arrays)] + list(shape_in))
# Find the common dtype for all inputs
dtype = np.find_common_type([arr.dtype for arr in arrays], [])
# Allocate an empty array of the right shape and type
result = np.empty(shape, dtype=dtype, order="F")
# Stack into the preallocated buffer
np.stack(arrays, axis=axis, out=result)
return result
def dtype_r2c(d, default=np.complex64):
"""Find the complex numpy dtype corresponding to a real dtype.
This is used to maintain numerical precision and memory footprint
when constructing complex arrays from real-valued data
(e.g. in a Fourier transform).
A `float32` (single-precision) type maps to `complex64`,
while a `float64` (double-precision) maps to `complex128`.
Parameters
----------
d : np.dtype
The real-valued dtype to convert to complex.
If ``d`` is a complex type already, it will be returned.
default : np.dtype, optional
The default complex target type, if ``d`` does not match a
known dtype
Returns
-------
d_c : np.dtype
The complex dtype
See Also
--------
dtype_c2r
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.float32)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.int16)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('complex128')
"""
mapping = {
np.dtype(np.float32): np.complex64,
np.dtype(np.float64): np.complex128,
np.dtype(np.float): np.complex,
}
# If we're given a complex type already, return it
dt = np.dtype(d)
if dt.kind == "c":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(dt, default))
def dtype_c2r(d, default=np.float32):
"""Find the real numpy dtype corresponding to a complex dtype.
This is used to maintain numerical precision and memory footprint
when constructing real arrays from complex-valued data
(e.g. in an inverse Fourier transform).
A `complex64` (single-precision) type maps to `float32`,
while a `complex128` (double-precision) maps to `float64`.
Parameters
----------
d : np.dtype
The complex-valued dtype to convert to real.
If ``d`` is a real (float) type already, it will be returned.
default : np.dtype, optional
The default real target type, if ``d`` does not match a
known dtype
Returns
-------
d_r : np.dtype
The real dtype
See Also
--------
dtype_r2c
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.complex64)
dtype('float32')
>>> librosa.util.dtype_r2c(np.float32)
dtype('float32')
>>> librosa.util.dtype_r2c(np.int16)
dtype('float32')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('float64')
"""
mapping = {
np.dtype(np.complex64): np.float32,
np.dtype(np.complex128): np.float64,
np.dtype(np.complex): np.float,
}
# If we're given a real type already, return it
dt = np.dtype(d)
if dt.kind == "f":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(np.dtype(d), default))
| isc |
YeoLab/anchor | anchor/simulate.py | 1 | 7366 |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import six
from .visualize import violinplot, MODALITY_ORDER, MODALITY_TO_COLOR, barplot
def add_noise(data, iteration_per_noise=100,
noise_percentages=np.arange(0, 101, step=10), plot=True,
violinplot_kws=None, figure_prefix='anchor_simulation'):
data_dfs = []
violinplot_kws = {} if violinplot_kws is None else violinplot_kws
width = len(data.columns) * 0.75
alpha = max(0.05, 1. / iteration_per_noise)
for noise_percentage in noise_percentages:
if plot:
fig, ax = plt.subplots(figsize=(width, 3))
for iteration in range(iteration_per_noise):
if iteration > 0 and noise_percentage == 0:
continue
noisy_data = data.copy()
shape = (noisy_data.shape[0] * noise_percentage / 100,
noisy_data.shape[1])
size = np.product(shape)
noise_ind = np.random.choice(noisy_data.index,
size=noise_percentage,
replace=False)
noisy_data.loc[noise_ind] = np.random.uniform(
low=0., high=1., size=size).reshape(shape)
renamer = dict(
(col, '{}_noise{}_iter{}'.format(
col, noise_percentage, iteration))
for col in noisy_data.columns)
renamed = noisy_data.rename(columns=renamer)
data_dfs.append(renamed)
if plot:
noisy_data_tidy = noisy_data.unstack()
noisy_data_tidy = noisy_data_tidy.reset_index()
noisy_data_tidy = noisy_data_tidy.rename(
columns={'level_0': 'Feature ID',
'level_1': 'Sample ID',
0: '$\Psi$'})
violinplot(x='Feature ID', y='$\Psi$',
data=noisy_data_tidy, ax=ax,
**violinplot_kws)
if plot:
if noise_percentage > 0:
for c in ax.collections:
c.set_alpha(alpha)
ax.set(ylim=(0, 1), title='{}% Uniform Noise'.format(
noise_percentage), yticks=(0, 0.5, 1), ylabel='$\Psi$',
xlabel='')
plt.setp(ax.get_xticklabels(), rotation=90)
sns.despine()
fig.tight_layout()
fig.savefig('{}_noise_percentage_{}.pdf'.format(figure_prefix,
noise_percentage))
all_noisy_data = pd.concat(data_dfs, axis=1)
return all_noisy_data
class ModalityEvaluator(object):
def __init__(self, estimator, data, waypoints, fitted, predicted):
self.estimator = estimator
self.data = data
self.predicted = predicted
self.fitted = fitted
self.waypoints = waypoints
def evaluate_estimator(estimator, data, waypoints=None, figure_prefix=''):
#
# estimator.violinplot(n=1e3)
# fig = plt.gcf()
# for ax in fig.axes:
# ax.set(yticks=[0, 0.5, 1], xlabel='')
# # xticklabels =
# # ax.set_xticklabels(fontsize=20)
# fig.tight_layout()
# sns.despine()
# fig.savefig('{}_modality_parameterization.pdf'.format(figure_prefix))
fitted = estimator.fit(data)
predicted = estimator.predict(fitted)
predicted.name = 'Predicted Modality'
fitted_tidy = fitted.stack().reset_index()
fitted_tidy = fitted_tidy.rename(
columns={'level_1': 'Feature ID', 'level_0': "Modality",
0: estimator.score_name}, copy=False)
predicted_tidy = predicted.to_frame().reset_index()
predicted_tidy = predicted_tidy.rename(columns={'index': 'Feature ID'})
predicted_tidy = predicted_tidy.merge(
fitted_tidy, left_on=['Feature ID', 'Predicted Modality'],
right_on=['Feature ID', 'Modality'])
# Make categorical so they are plotted in the correct order
predicted_tidy['Predicted Modality'] = \
pd.Categorical(predicted_tidy['Predicted Modality'],
categories=MODALITY_ORDER, ordered=True)
predicted_tidy['Modality'] = \
pd.Categorical(predicted_tidy['Modality'],
categories=MODALITY_ORDER, ordered=True)
grouped = data.groupby(predicted, axis=1)
size = 5
fig, axes = plt.subplots(figsize=(size*0.75, 8), nrows=len(grouped))
for ax, (modality, df) in zip(axes, grouped):
random_ids = np.random.choice(df.columns, replace=False, size=size)
random_df = df[random_ids]
tidy_random = random_df.stack().reset_index()
tidy_random = tidy_random.rename(columns={'level_0': 'sample_id',
'level_1': 'event_id',
0: '$\Psi$'})
sns.violinplot(x='event_id', y='$\Psi$', data=tidy_random,
color=MODALITY_TO_COLOR[modality], ax=ax,
inner=None, bw=0.2, scale='width')
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1), xticks=[], xlabel='',
title=modality)
sns.despine()
fig.tight_layout()
fig.savefig('{}_random_estimated_modalities.pdf'.format(figure_prefix))
g = barplot(predicted_tidy, hue='Modality')
g.savefig('{}_modalities_barplot.pdf'.format(figure_prefix))
plot_best_worst_fits(predicted_tidy, data, modality_col='Modality',
score=estimator.score_name)
fig = plt.gcf()
fig.savefig('{}_best_worst_fit_violinplots.pdf'.format(figure_prefix))
fitted.to_csv('{}_fitted.csv'.format(figure_prefix))
predicted.to_csv('{}_predicted.csv'.format(figure_prefix))
result = ModalityEvaluator(estimator, data, waypoints, fitted, predicted)
return result
def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality"""
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout()
| bsd-3-clause |
spacetelescope/stsci.tools | doc/source/conf.py | 1 | 7012 | # -*- coding: utf-8 -*-
#
# stsci.tools documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 7 13:09:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from stsci.tools import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stsci.tools'
copyright = u'2020, STScI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_static_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = ['py-modindex']
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'stsci.toolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'stsci.tools.tex', u'stsci.tools Documentation',
# u'SSB', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/',
(None, 'http://data.astropy.org/intersphinx/matplotlib.inv')),
'astropy': ('https://docs.astropy.org/en/stable/', None)
}
| bsd-3-clause |
ngcurrier/ProteusCFD | GUI/dakotaHistogram.py | 1 | 1543 | #!/usr/bin/python
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#reads a space delimited file with a header and returns a dictionary
#attempts to cast dictionary entries into floats, if it fails, leaves as is
def readSpaceDelimitedFile(filename):
f = open(filename, 'r')
headers = f.readline().split()
dict = {}
for header in headers:
dict[header] = []
for line in f:
items = line.split()
i = 0
for header in headers:
try:
dict[header].append(float(items[i]))
except:
dict[header].append(items[i])
i = i + 1
f.close()
return dict
#plots a histogram of data, computes basic stats, and labels chart
def plotHistogram(data, seriesName):
# the histogram of the data
n, bins, patches = plt.hist(data, 50, normed=1, facecolor='green', alpha=0.75)
mu = np.mean(data)
sigma = np.std(data)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel(seriesName)
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ ' + seriesName + ':}\ \mu=' + str(mu) +',\ \sigma=' + str(sigma) +'$')
plt.grid(True)
plt.show()
if __name__ == '__main__':
data = readSpaceDelimitedFile('dakota_tabular.dat')
print data
for idata in data:
if idata != 'interface' and idata != '%eval_id':
plotHistogram(data[idata], idata)
| gpl-3.0 |
jmschrei/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 9 | 5718 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214:
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
malvikasharan/APRICOT | apricotlib/apricot_visualization.py | 1 | 22211 | #!/usr/bin/env python
# Description = Visualizes different output data from APRICOT analysis
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
try:
import subprocess
except ImportError:
print('Python package subprocess is missing. Please install/update.\n')
sys.exit(0)
try:
import shutil
except ImportError:
print('Python package shutil is missing. Please install/update.\n')
sys.exit(0)
class VizApricotAnalysis(object):
def __init__(self, annotation_scoring_data,
domain_file,
additional_annotation,
outpath):
self._annotation_scoring_data = annotation_scoring_data
self._domain_file = domain_file
self._additional_annotation = additional_annotation
self._outpath = outpath
self._sec_str = self._outpath+'/secondary_structure'
self._dom_highlight = self._outpath+'/domain_highlighting'
self._pdb_msa = self._outpath+'/homologous_pdb_msa'
self._overview = self._outpath+'/overview_and_statistics'
self._localize = self._outpath+'/subcellular_localization'
self._annotation_data = []
self._filter_viz_dict = {}
self._highlight_dict = {}
self._uid_key_dict = {}
self._dom_rank = {}
self._fasta_dict = {}
self._secstr_dict = {}
self._dom_annotation = {}
self._location_dict = defaultdict(lambda: defaultdict(lambda: []))
self._sec_str_color = {'H': '#FF6666', 'E': '#33CCCC', 'C': '#FFFFCC'}
self._localization_dict = defaultdict(
lambda: defaultdict(lambda: float))
self._color_list = (
"Blue", "Green", "Teal", "Lime", "SeaGreen", "MediumTurquoise",
"Pink", "DarkOliveGreen", "Indigo", "Orange", "SlateBlue",
"LawnGreen", "Brown", "LightSkyBlue", "LightGreen", "DarkOrchid",
"GoldenRod", "MidnightBlue", "LightPink", "Gold")
def viz_all_the_visualization_files(self):
self.viz_domain_data()
self.domain_highlight()
self.viz_annotation_scoring()
self.viz_secondary_structure()
self.viz_subcellular_localization()
self.viz_homologous_pdb_msa()
def viz_domain_data(self):
with open(self._domain_file, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
domain_info = DomainDataColumns(
entry.strip().split('\t'))
prot_name = domain_info.entry_name
prot_end = int(domain_info.length)-1
prot_key = '\n'.join(
["\tstart: 0,", "\tend: %s,"
% prot_end, '\tname: "%s",' % prot_name,
'\thref: "http://www.uniprot.org/uniprot/%s"'
% domain_info.uid])
self._uid_key_dict[domain_info.uid] = prot_key
self._location_dict[
domain_info.uid][domain_info.domain_id].append(
'\t{start: %s, end: %s}' % (
domain_info.start, domain_info.stop))
self._dom_annotation[
domain_info.domain_id] = domain_info.full_name
src = domain_info.resource
if src == 'CDD':
self._dom_rank.setdefault(
domain_info.uid+':CDD', []).append(
domain_info.domain_id)
self._highlight_dict.setdefault(
prot_key, []).append('\n'.join(
['\t\tstart: %s,' % domain_info.start,
'\t\tend: %s,' % domain_info.stop,
'\t\tdomain: {', '\t\t\tname: "%s",'
% domain_info.domain_id,
'\t\t\tid: %s,' % len(
self._dom_rank[domain_info.uid+':CDD']),
'\t\t\tdescription: "%s"},' %
domain_info.short_name,
'\t\tsource: {', '\t\t\tname: "CDD",',
'\t\t\thref: null,', '\t\t\tid: 1}']))
else:
self._dom_rank.setdefault(
domain_info.uid+':IPR', []).append(
domain_info.domain_id)
self._highlight_dict.setdefault(
prot_key, []).append('\n'.join(
['start: %s,' % domain_info.start,
'end: %s,' % domain_info.stop,
'domain: {', '\t\tname: "%s",' %
domain_info.domain_id,
'\t\tid: %s,' % len(
self._dom_rank[domain_info.uid+':IPR']),
'\t\tdescription: "%s"},' % domain_info.short_name,
'source: {', '\t\tname: "InterPro",',
'\t\thref: null,', '\t\tid: 2}']))
return self._uid_key_dict, self._location_dict, self._dom_annotation, self._dom_highlight, self._highlight_dict
def domain_highlight(self):
for uid in self._uid_key_dict.keys():
header = '\n'.join(['<meta charset="UTF-8">'
'<link type="text/css" rel="stylesheet" href="http://parce.li/bundle/biojs-vis-protein-viewer@0.1.4">',
'<script src="https://wzrd.in/bundle/biojs-vis-protein-viewer@0.1.4"></script>',
'<div id="j-main">', '</div>', '<script>',
'var ProteinViewer = require("biojs-vis-protein-viewer");'])
body = '\n'.join(['var highlightData = [', '\t{',
'\n\t},\n\t{\n'.join(self._highlight_dict[
self._uid_key_dict[uid]]), '\t}', '];'])
panel = '\n'.join(['var highlightLocusData = {',
self._uid_key_dict[uid], '};'])
footer = '\n'.join([
'var pv = new ProteinViewer({',
'\tel: document.getElementById("j-main"),',
'\tdata: highlightData,',
'\tlocusData: highlightLocusData', '});',
'pv.render();', '</script>'])
with open(self._dom_highlight+'/%s.html' % uid, 'w') as out_fh:
out_fh.write('\n'.join([header, body, panel, footer]))
def viz_annotation_scoring(self):
if os.path.exists(self._annotation_scoring_data):
with open(self._annotation_scoring_data, 'r') as in_fh:
for entry in in_fh:
if not entry.startswith('Entry'):
self._filter_viz_dict.setdefault('filter1_list', []).append(
float(entry.strip().split('\t')[-5]))
self._filter_viz_dict.setdefault('filter2_list', []).append(
float(entry.strip().split('\t')[-4]))
self._filter_viz_dict.setdefault('filter3_list', []).append(
float(entry.strip().split('\t')[-3]))
self._filter_viz_dict.setdefault('filter4_list', []).append(
float(entry.strip().split('\t')[-2]))
self._filter_viz_dict.setdefault('bayscore_list', []).append(
float(entry.strip().split('\t')[-1]))
try:
label_list = range(0, len(self._filter_viz_dict['bayscore_list']))
plt.plot(sorted(self._filter_viz_dict['filter1_list']), 'ro', label='Filter-1 Score')
plt.plot(sorted(self._filter_viz_dict['filter2_list']), 'ys', label='Filter-2 Score')
plt.plot(sorted(self._filter_viz_dict['filter3_list']), 'g8', label='Filter-3 Score')
plt.plot(sorted(self._filter_viz_dict['filter4_list']), 'mp', label='Filter-4 Score')
plt.plot(sorted(self._filter_viz_dict['bayscore_list']), 'b^', label='Bayesian Score')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.xticks(label_list)
plt.xlabel('Annotation scores of selected proteins')
plt.ylabel('Filter/Bayesian score')
plt.savefig(os.path.join(self._overview, 'viz_annotation_scoring.png'))
except KeyError:
print("!!! The annotation scoring file seems to be empty."
" Please reanalyse annotation score using the subcommand 'annoscore' !!!")
else:
print('The data for annotation scores do not exist,'
'please calculate the annotation score using the subcommand'
'"annoscore", the flag "-nd" can be used to specify the absolute path for needle.')
def viz_secondary_structure(self):
for uid in self._uid_key_dict.keys():
if uid+'.horiz' in os.listdir(
self._additional_annotation+'/protein_secondary_structure/'):
files = uid+'.horiz'
elif uid+'.plain' in os.listdir(
self._additional_annotation+'/protein_secondary_structure/'):
files = uid+'.plain'
print("\nRaptorX secondary structure files are unavailable.")
print("Visualizing secondary structure using literature based analysis.\n")
else:
print("\nRaptorX/literature-based secondary structure files are unavailable.")
print("Exiting the current analysis.")
print("Please re-run the secondary structure prediction by RaptorX\n")
return
secstr_list = []
uid_secstr_dict = {}
sec_data_sites = []
with open(self._additional_annotation+
'/protein_secondary_structure/'+files, 'r') as in_fh:
for entry in in_fh:
if 'AA: ' in entry:
self._fasta_dict.setdefault(uid,
[]).append(entry.strip().split('AA: ')[1])
if 'Pred: ' in entry:
try:
secstr_list.append(entry.strip().split('Pred: ')[1])
except IndexError:
print("\nRaptorX output file is incomplete. Exiting the current analysis.")
print("Please re-run the secondary structure prediction by RaptorX\n")
return
for i, pred_data in enumerate(''.join(secstr_list)):
uid_secstr_dict[i] = pred_data
for j in range(len(uid_secstr_dict)-1):
if j == 0:
sec_data_sites.append(j)
if not uid_secstr_dict[j] == uid_secstr_dict[j+1]:
sec_data_sites.append(j+1)
self._secstr_dict.setdefault(uid, []).append(
'mySequence.addHighlight({start:%s, end:%s, color:"Black", background:"%s"});'
%(int(sec_data_sites[-2])+1, int(j)+1,
self._sec_str_color[uid_secstr_dict[j]]))
self._secstr_dict.setdefault(uid, []).append(
'mySequence.addHighlight({start:%s, end:%s, color:"Black", background:"%s"});'
%(int(sec_data_sites[-1])+1, int(list(uid_secstr_dict.keys())[-1])+1,
self._sec_str_color[uid_secstr_dict[j]]))
self.sec_str_script()
def sec_str_script(self):
for uid in self._fasta_dict.keys():
header = '\n'.join(['<meta charset="UTF-8">',
'<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>',
'<script src="https://wzrd.in/bundle/biojs-vis-sequence@0.1.7"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-fasta@latest"></script>',
'<div id="snippetDiv"></div>', '<script>',
'var yourDiv = document.getElementById("snippetDiv");',
'var Seq = require("biojs-vis-sequence");'])
footer = '\n'.join([
'mySequence.on("all",function(name,data){var obj = {name: name, data: data};if(inIframe()){ parent.postMessage(obj, "*") }})',
'mySequence.onAll(function(name,data){',
'console.log(arguments);', '});', '};',
'function inIframe(){try{return window.self!==window.top}catch(e){return true}}',
'</script>'])
body1 = '\n'.join(['var theSequence = "%s";' %
''.join(self._fasta_dict[uid]), 'yourDiv.textContent = "";',
'window.onload = function() {', 'var mySequence = new Seq({',
'\tsequence : theSequence,', '\ttarget : yourDiv.id,',
'\tformat : "CODATA",', '\tformatOptions : {',
'\ttitle:false,', '\tfooter:false', '\t},', '\tid : "%s"' % uid, '});'])
body2 = '\n'.join(self._secstr_dict[uid])
dom_list = sorted(list(self._location_dict[uid].keys()))
annotation_list = []
for dom_id in dom_list:
dom_idx = dom_list.index(dom_id)
annotation_list.append('\n'.join([
'mySequence.addAnnotation({', 'name:"Domain-%s",' % str(int(dom_idx)+1),
'html:"<br>%s<br>%s</b>",' % (dom_id,
self._dom_annotation[dom_id]), 'color:"%s",' % self._color_list[dom_idx],
'regions: [', ',\n'.join(self._location_dict[uid][dom_id]), ']});']))
with open(self._sec_str+'/'+uid+'.html', 'w') as out_fh:
out_fh.write('\n'.join([header, body1, '\n'.join(annotation_list),
body2, footer]))
def viz_subcellular_localization(self):
''''''
if 'psortb_data_summary.csv' in os.listdir(
self._additional_annotation+'/protein_localization'):
total_loc = set()
with open(
self._additional_annotation+'/protein_localization/psortb_data_summary.csv',
'r') as in_fh:
for entry in in_fh:
if not 'Localization' in entry:
protein = entry.strip().split('\t')[0]
localization = entry.strip().split('\t')[1]
if not localization.lower() == 'unknown':
score = float(entry.strip().split('\t')[2])
self._localization_dict[protein][localization] = score
total_loc.add(localization)
with open(self._localize+'/localization_table.csv', 'w') as out_fh:
out_fh.write('Proteins\t%s\n' % '\t'.join(sorted(list(total_loc))))
for each_prot in self._localization_dict.keys():
for localization in self._localization_dict[each_prot]:
entry_list = list('0'*len(total_loc))
loc_idx = sorted(list(total_loc)).index(localization)
entry_list[loc_idx] = self._localization_dict[each_prot][localization]
out_fh.write("%s\t%s\n" % (each_prot, '\t'.join(map(str, entry_list))))
self._create_localization_heatmap()
else:
print("\nPsortB-based localization prediction files are unavailable.")
print("Exiting the current analysis.")
print("Please re-run the localization prediction by PsortB\n")
return
def _create_localization_heatmap(self):
''''''
plot_file = self._localize+'/localization_heatmap.pdf'
infile = self._localize+'/localization_table.csv'
with open(self._localize+'/localization_heatmap.R', 'w') as r_fh:
r_fh.write('\n'.join(['library(gplots)', 'library(RColorBrewer)', 'display.brewer.all()',
'data <- read.csv("%s", header=T, sep = "\\t")' % infile,
'rnames <- data[,1]', 'data_matrix <- data.matrix(data[,2:ncol(data)])',
'data_matrix[is.na(data_matrix)] <- 0', 'data_matrix[is.nan(data_matrix)] <- 0',
'data_matrix[is.infinite(data_matrix)] <- max(data_matrix)',
'rownames(data_matrix) <- rnames', 'pdf(file="%s")' % plot_file,
'out_map <- heatmap.2(data_matrix, dendrogram = "none", Rowv = FALSE, \
Colv = FALSE, col=brewer.pal(9,"YlGn"), margins=c(5,8), \
cexCol=0.8, cexRow=0.8, key.title="PsortB Pred-value", key.xlab="", key.ylab="")',
'dev.off()']))
subprocess.Popen(['Rscript %s/localization_heatmap.R' %
self._localize], shell=True).wait()
def viz_homologous_pdb_msa(self):
header = '\n'.join(['<meta charset="UTF-8">',
'<link type="text/css" rel="stylesheet" href="http://parce.li/bundle/msa@0.4.8">',
'<script src="https://wzrd.in/bundle/msa@0.4.8"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-fasta@latest"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-clustal@latest"></script>',
'<script src="https://wzrd.in/bundle/biojs-io-gff@latest"></script>',
'<script src="https://wzrd.in/bundle/xhr@latest"></script>',
'<div id="snippetDiv"></div>', '<script>',
'var rootDiv = document.getElementById("snippetDiv");',
'var msa = require("msa");', 'var menuDiv = document.createElement("div");',
'var msaDiv = document.createElement("div");',
'rootDiv.appendChild(menuDiv);', 'rootDiv.appendChild(msaDiv);'])
footer = '\n'.join(['opts.conf = {', '\tdropImport: true,',
'\tmanualRendering: true', '};', 'opts.vis = {', '\tconserv: false,',
'\toverviewbox: false,', '\tseqlogo: true,', '\tmetacell: true', '};',
'opts.zoomer = {', '\tlabelIdLength: 20', '};', 'var m = msa(opts);',
'gg = m;', 'm.u.file.importURL(url, function() {',
'\tvar defMenu = new msa.menu.defaultmenu({', '\t\tel: menuDiv,',
'\t\tmsa: m', '\t});', '\tdefMenu.render();', '\tm.render();', '});',
'm.g.on("all",function(name,data){var obj = {name: name, data: data};if(inIframe()){ parent.postMessage(obj, "*") }})',
'function inIframe(){try{return window.self!==window.top}catch(e){return true}}',
'</script>'])
body = '//EDIT PATH\n'.join([
'var url = "https://github.com/malvikasharan/APRICOT/blob/master/Biojs_dependencies/data/biojs_msa_tab.clustal";'
'var opts = {', '\tel: msaDiv', '};'])
with open(self._pdb_msa+'/Biojs_pdb_msa_tab.html', 'w') as out_fh:
out_fh.write('\n'.join([header, body, footer]))
for files in os.listdir(self._additional_annotation+'/pdb_sequence_prediction/'):
if '_top5.fasta' in files:
shutil.copyfile(
self._additional_annotation+'/pdb_sequence_prediction/'+files,
self._pdb_msa+'/'+files)
subprocess.Popen(['bin/reference_db_files/clustal/clustalw2 %s' %
self._pdb_msa+'/'+files], shell=True).wait()
print("\nPlease open the BioJS MSA tab generated in Biojs_pdb_msa_tab.html.")
print("Import MSA files (.aln) in the BioJS MSA tab to visualize the alignment.\n")
class AnnotationScoringColumns(object):
'''Column information of annotation scoring file'''
def __init__(self, row):
self.uid = row[0]
self.entry_name = row[1]
self.prot_name = row[2]
self.species = row[3]
self.length = row[4]
self.resource = row[5]
self.resource_id = row[6]
self.domain_id = row[7]
self.short_name = row[8]
self.full_name = row[9]
self.domain_length = row[10]
self.start = row[11]
self.stop = row[12]
self.ref_seq = row[13]
self.q_seq = row[14]
self.ref_ss = row[15]
self.q_ss = row[16]
self.mol_mass = row[17]
self.iso_pt = row[18]
self.solub = row[19]
self.vdw = row[20]
self.coverage = row[21]
self.cov_by_dom = row[22]
self.identity = row[23]
self.iden_by_cov = row[24]
self.similarity = row[25]
self.sim_by_cov = row[26]
self.gap = row[27]
self.gap_by_cov = row[28]
self.AA_RO = row[29]
self.SS_RO = row[30]
self.PC_RO = row[31]
self.AAC_ED = row[32]
self.PCC_ED = row[33]
self.DPC_ED = row[34]
self.TPC_ED = row[35]
class DomainDataColumns(object):
'''Column information of domain annotation file'''
def __init__(self, row):
self.uid = row[0]
self.entry_name = row[1]
self.prot_name = row[2]
self.species = row[3]
self.length = row[4]
self.gene_name = row[5]
self.locus_tag = row[6]
self.existance = row[7]
self.go = row[8]
self.embl_id = row[9]
self.pdb_id = row[10]
self.kegg_id = row[11]
self.interpro_id = row[12]
self.pfam_id = row[13]
self.pubmed_id = row[14]
self.resource = row[15]
self.resource_id = row[16]
self.domain_id = row[17]
self.short_name = row[18]
self.full_name = row[19]
self.dom_kw = row[20]
self.dom_go = row[21]
self.members = row[22]
self.dom_len = row[23]
self.start = row[24]
self.stop = row[25]
self.evalue = row[26]
self.bitscore = row[27]
self.bits = row[28]
self.cover_len = row[29]
self.cov_prcnt = row[30]
self.identity = row[31]
self.iden_prcnt = row[32]
self.similarity = row[33]
self.sim_prcnt = row[34]
self.gaps = row[35]
self.gap_prcnt = row[36]
self.filter_tag = row[37]
| isc |
joernhees/scikit-learn | examples/linear_model/plot_sgd_iris.py | 58 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
jensreeder/scikit-bio | skbio/diversity/beta/__init__.py | 1 | 6898 | """
Beta diversity measures (:mod:`skbio.diversity.beta`)
=====================================================
.. currentmodule:: skbio.diversity.beta
This package contains helper functions for working with scipy's pairwise
distance (``pdist``) functions in scikit-bio, and will eventually be expanded
to contain pairwise distance/dissimilarity methods that are not implemented
(or planned to be implemented) in scipy.
The functions in this package currently support applying ``pdist`` functions
to all pairs of samples in a sample by observation count or abundance matrix
and returning an ``skbio.DistanceMatrix`` object. This application is
illustrated below for a few different forms of input.
Functions
---------
.. autosummary::
:toctree: generated/
pw_distances
pw_distances_from_table
Examples
--------
Create a table containing 7 OTUs and 6 samples:
.. plot::
:context:
>>> from skbio.diversity.beta import pw_distances
>>> import numpy as np
>>> data = [[23, 64, 14, 0, 0, 3, 1],
... [0, 3, 35, 42, 0, 12, 1],
... [0, 5, 5, 0, 40, 40, 0],
... [44, 35, 9, 0, 1, 0, 0],
... [0, 2, 8, 0, 35, 45, 1],
... [0, 0, 25, 35, 0, 19, 0]]
>>> ids = list('ABCDEF')
Compute Bray-Curtis distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> bc_dm = pw_distances(data, ids, "braycurtis")
>>> print(bc_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.78787879 0.86666667 0.30927835 0.85714286 0.81521739]
[ 0.78787879 0. 0.78142077 0.86813187 0.75 0.1627907 ]
[ 0.86666667 0.78142077 0. 0.87709497 0.09392265 0.71597633]
[ 0.30927835 0.86813187 0.87709497 0. 0.87777778 0.89285714]
[ 0.85714286 0.75 0.09392265 0.87777778 0. 0.68235294]
[ 0.81521739 0.1627907 0.71597633 0.89285714 0.68235294 0. ]]
Compute Jaccard distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> j_dm = pw_distances(data, ids, "jaccard")
>>> print(j_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.83333333 1. 1. 0.83333333 1. ]
[ 0.83333333 0. 1. 1. 0.83333333 1. ]
[ 1. 1. 0. 1. 1. 1. ]
[ 1. 1. 1. 0. 1. 1. ]
[ 0.83333333 0.83333333 1. 1. 0. 1. ]
[ 1. 1. 1. 1. 1. 0. ]]
Determine if the resulting distance matrices are significantly correlated
by computing the Mantel correlation between them. Then determine if the
p-value is significant based on an alpha of 0.05:
>>> from skbio.stats.distance import mantel
>>> r, p_value, n = mantel(j_dm, bc_dm)
>>> print(r)
-0.209362157621
>>> print(p_value < 0.05)
False
Compute PCoA for both distance matrices, and then find the Procrustes
M-squared value that results from comparing the coordinate matrices.
>>> from skbio.stats.ordination import PCoA
>>> bc_pc = PCoA(bc_dm).scores()
>>> j_pc = PCoA(j_dm).scores()
>>> from skbio.stats.spatial import procrustes
>>> print(procrustes(bc_pc.site, j_pc.site)[2])
0.466134984787
All of this only gets interesting in the context of sample metadata, so
let's define some:
>>> import pandas as pd
>>> try:
... # not necessary for normal use
... pd.set_option('show_dimensions', True)
... except KeyError:
... pass
>>> sample_md = {
... 'A': {'body_site': 'gut', 'subject': 's1'},
... 'B': {'body_site': 'skin', 'subject': 's1'},
... 'C': {'body_site': 'tongue', 'subject': 's1'},
... 'D': {'body_site': 'gut', 'subject': 's2'},
... 'E': {'body_site': 'tongue', 'subject': 's2'},
... 'F': {'body_site': 'skin', 'subject': 's2'}}
>>> sample_md = pd.DataFrame.from_dict(sample_md, orient='index')
>>> sample_md
subject body_site
A s1 gut
B s1 skin
C s1 tongue
D s2 gut
E s2 tongue
F s2 skin
<BLANKLINE>
[6 rows x 2 columns]
Now let's plot our PCoA results, coloring each sample by the subject it
was taken from:
>>> fig = bc_pc.plot(sample_md, 'subject',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by subject', cmap='jet', s=50)
.. plot::
:context:
We don't see any clustering/grouping of samples. If we were to instead color
the samples by the body site they were taken from, we see that the samples
form three separate groups:
>>> import matplotlib.pyplot as plt
>>> plt.close('all') # not necessary for normal use
>>> fig = bc_pc.plot(sample_md, 'body_site',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by body site', cmap='jet', s=50)
Ordination techniques, such as PCoA, are useful for exploratory analysis. The
next step is to quantify the strength of the grouping/clustering that we see in
ordination plots. There are many statistical methods available to accomplish
this; many operate on distance matrices. Let's use ANOSIM to quantify the
strength of the clustering we see in the ordination plots above, using our
Bray-Curtis distance matrix and sample metadata.
First test the grouping of samples by subject:
>>> from skbio.stats.distance import anosim
>>> results = anosim(bc_dm, sample_md, column='subject', permutations=999)
>>> results['test statistic']
-0.4074074074074075
>>> results['p-value'] < 0.1
False
The negative value of ANOSIM's R statistic indicates anti-clustering and the
p-value is insignificant at an alpha of 0.1.
Now let's test the grouping of samples by body site:
>>> results = anosim(bc_dm, sample_md, column='body_site', permutations=999)
>>> results['test statistic']
1.0
>>> results['p-value'] < 0.1
True
The R statistic of 1.0 indicates strong separation of samples based on body
site. The p-value is significant at an alpha of 0.1.
References
----------
.. [1] http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._base import pw_distances, pw_distances_from_table
__all__ = ["pw_distances", "pw_distances_from_table"]
test = TestRunner(__file__).test
| bsd-3-clause |
formath/mxnet | docs/mxdoc.py | 11 | 12953 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
# import StringIO from io for python3 compatibility
from io import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, blk_lang, lines in _get_blocks(lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download-btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 |
mbonsma/studyGroup | lessons/python/matplotlib/hwk3.1.py | 12 | 2149 | # -*- coding: utf-8 -*-
from numpy import float32
from numpy import linspace
from numpy import polyfit
from numpy import polyval
import matplotlib.pyplot as plt
#Read in data from csv
f=open('data.csv','r')
line=f.readlines()
#Empty array for data
FN=[]
EFN=[]
#This loop goes through every line, strips new line character and then splits the data on ,. It will then save data into the arrays
for l in line:
a=l.strip()
x,y=a.split(",")
FN.append(float32(x))
EFN.append(float32(y))
f.close()
#Generate linear space but this was not used as of yet
z=linspace(-1,4)
#Create grid and plot data
fig = plt.figure(figsize = (4,4), dpi = 600)
a = fig.add_subplot(1,1,1)
plt.plot(FN,EFN,'ks',markersize=3)
#Created a fitted line for the data
fit=polyfit(FN,EFN,1)
plt.plot(z,polyval(fit,z),label=fit,color='k')
#Reset font size
for t in a.yaxis.get_major_ticks():
t.label.set_fontsize(6)
for t in a.xaxis.get_major_ticks():
t.label.set_fontsize(6)
#Set the subplot sizing
fig.subplots_adjust(top=0.95, right =0.89, left=0.13,bottom=0.25)
#Set limits and labels
plt.xlim(-0.2,3.5)
plt.ylim(0,0.8)
plt.ylabel(r'Extrafloral Nectar (mg of sugar per extrafloral nectary)',fontsize=6,verticalalignment='center')
plt.xlabel(r'Floral Nectar (mg of sugar per flower)',fontsize=6,horizontalalignment='center')
#Save as pdf
fig.savefig('EFNvFN.pdf',dpi=600)
plt.show()
"""In ecology, animals and plants interact with one another in an ecosystem.
There are several types of interactions that may occur such as predation,
parasitisim and mutualism. Mutualism is where the animals and plants both give
one another a survival benefit. So if a trait is not useful why invest energy
into producing it?
Different interactions have generally been studied individually even though
they occur in a community. This plot shows the relationship between EFN and FN
production in T. ulmifolia. There is a positive correlation, which suggests that
plants that produce more of one also produce more of the other
This is probably because of overall plant vigour. This was an initial figure
for a later experiment showing interactions."""
| apache-2.0 |
mandli/multilayer-examples | 1d/setplot_shelf.py | 1 | 12827 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
# Plot customization
import matplotlib
# Markers and line widths
matplotlib.rcParams['lines.linewidth'] = 2.0
matplotlib.rcParams['lines.markersize'] = 6
matplotlib.rcParams['lines.markersize'] = 8
# Font Sizes
matplotlib.rcParams['font.size'] = 16
matplotlib.rcParams['axes.labelsize'] = 15
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
# DPI of output images
matplotlib.rcParams['savefig.dpi'] = 300
# Need to do this after the above
import matplotlib.pyplot as mpl
from clawpack.pyclaw.solution import Solution
from multilayer.aux import bathy_index,kappa_index,wind_index
import multilayer.plot as plot
# matplotlib.rcParams['figure.figsize'] = [6.0,10.0]
def setplot(plotdata,eta=[0.0,-300.0],rho=[1025.0,1045.0],g=9.81,dry_tolerance=1e-3,bathy_ref_lines=[-30e3]):
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
# Fetch bathymetry once
b = Solution(0,path=plotdata.outdir,read_aux=True).state.aux[bathy_index,:]
# ========================================================================
# Plot variable functions
def bathy(cd):
return b
def kappa(cd):
return Solution(cd.frameno,path=plotdata.outdir,read_aux=True).state.aux[kappa_index,:]
def wind(cd):
return Solution(cd.frameno,path=plotdata.outdir,read_aux=True).state.aux[wind_index,:]
def h_1(cd):
return cd.q[0,:] / rho[0]
def h_2(cd):
return cd.q[2,:] / rho[1]
def eta_2(cd):
return h_2(cd) + bathy(cd)
def eta_1(cd):
return h_1(cd) + eta_2(cd)
def u_1(cd):
index = np.nonzero(h_1(cd) > dry_tolerance)
u_1 = np.zeros(h_1(cd).shape)
u_1[index] = cd.q[1,index] / cd.q[0,index]
return u_1
def u_2(cd):
index = np.nonzero(h_2(cd) > dry_tolerance)
u_2 = np.zeros(h_2(cd).shape)
u_2[index] = cd.q[3,index] / cd.q[2,index]
return u_2
def hu_1(cd):
index = np.nonzero(h_1(cd) > dry_tolerance)
hu_1 = np.zeros(h_1(cd).shape)
hu_1[index] = cd.q[1,index] / rho[0]
return hu_1
def hu_2(cd):
index = np.nonzero(h_2(cd) > dry_tolerance)
hu_2 = np.zeros(h_2(cd).shape)
hu_2[index] = cd.q[3,index] / rho[1]
return hu_2
# ========================================================================
# Labels
def add_bathy_dashes(current_data):
for ref_line in bathy_ref_lines:
mpl.plot([ref_line,ref_line],[-10,10],'k--')
def add_horizontal_dashes(current_data):
mpl.plot([-400e3,0.0],[0.0,0.0],'k--')
def km_labels(current_data):
r"""Flips xaxis and labels with km"""
mpl.xlabel('km')
locs,labels = mpl.xticks()
labels = np.flipud(locs)/1.e3
mpl.xticks(locs,labels)
def time_labels(current_data):
r"""Convert time to hours"""
pass
# ========================================================================
# Limit Settings
xlimits = [-400e3,0.0]
ylimits_depth = [-4000.0,100.0]
xlimits_zoomed = [-30e3-1e3,-30e3+1e3]
ylimits_surface_zoomed = [eta[0] - 0.5,eta[0] + 0.5]
ylimits_internal_zoomed = [eta[1] - 2.5,eta[1] + 2.5]
ylimits_momentum = [-40,10]
# ylimits_velocities = [-1.0,1.0]
ylimits_velocities = [-0.04,0.04]
ylimits_kappa = [0.0,1.2]
# Create data object
plotdata.clearfigures() # clear any old figures,axes,items data
# ========================================================================
# Function for doing depth drawing
# ========================================================================
def fill_items(plotaxes):
# Top layer
plotitem = plotaxes.new_plotitem(plot_type='1d_fill_between')
plotitem.plot_var = eta_1
plotitem.plot_var2 = eta_2
plotitem.color = plot.top_color
plotitem.plotstyle = plot.surface_linestyle
plotitem.show = True
# Bottom Layer
plotitem = plotaxes.new_plotitem(plot_type='1d_fill_between')
plotitem.plot_var = eta_2
plotitem.plot_var2 = bathy
plotitem.color = plot.bottom_color
plotitem.plotstyle = plot.internal_linestyle
plotitem.show = True
# Plot bathy
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = bathy
plotitem.plotstyle = plot.bathy_linestyle
plotitem.show = True
# Plot line in between layers
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = eta_2
plotitem.color = 'k'
plotitem.plotstyle = plot.internal_linestyle
plotitem.show = True
# Plot line on top layer
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = eta_1
plotitem.color = 'k'
plotitem.plotstyle = plot.surface_linestyle
plotitem.show = True
# ========================================================================
# Full Depths
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Full Depths',figno=102)
plotfigure.show = True
def bathy_axes(cd):
km_labels(cd)
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
mpl.xlabel('km')
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Full Depths'
plotaxes.xlimits = xlimits
plotaxes.ylimits = [-4100,100]
plotaxes.afteraxes = bathy_axes
fill_items(plotaxes)
# ========================================================================
# Momentum
# ========================================================================
plotfigure = plotdata.new_plotfigure(name="momentum")
plotfigure.show = True
def momentum_axes(cd):
km_labels(cd)
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
mpl.xlabel('km')
mpl.title("Layer Momenta at t = %4.1f s" % cd.t)
mpl.legend(['Top Layer Momentum','Bottom Layer Momentum'],loc=4)
def inset_momentum_axes(cd):
# TODO: This plot does not refresh correctly, skip the inset
fig = mpl.figure(cd.plotfigure.figno)
axes = fig.add_subplot(111)
# Plot main figure
axes.plot(cd.x, hu_1(cd), 'b-')
axes.plot(cd.x, hu_2(cd), 'k--')
axes.set_xlim(xlimits)
axes.set_ylim(ylimits_momentum)
momentum_axes(cd)
# Create inset plot
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
inset_axes = zoomed_inset_axes(axes, 0.5, loc=3)
inset_axes.plot(cd.x, hu_1(cd), 'b-')
inset_axes.plot(cd.x, hu_2(cd), 'k--')
inset_axes.set_xticklabels([])
inset_axes.set_yticklabels([])
x_zoom = [-120e3,-30e3]
y_zoom = [-10,10]
inset_axes.set_xlim(x_zoom)
inset_axes.set_ylim(y_zoom)
mark_inset(axes, inset_axes, loc1=2, loc2=4, fc='none', ec="0.5")
# mpl.ion()
mpl.draw()
# mpl.show()
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Momentum"
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_momentum
# plotaxes.afteraxes = inset_momentum_axes
# Top layer
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = hu_1
plotitem.plotstyle = 'b-'
plotitem.show = True
# Bottom layer
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = hu_2
plotitem.plotstyle = 'k--'
plotitem.show = True
# ========================================================================
# Velocities with Kappa
# ========================================================================
include_kappa = False
if include_kappa:
plotfigure = plotdata.new_plotfigure(name='Velocity and Kappa',figno=14)
else:
plotfigure = plotdata.new_plotfigure(name='Velocities',figno=14)
plotfigure.show = True
# plotfigure.kwargs = {'figsize':(7,6)}
def twin_axes(cd):
fig = mpl.gcf()
fig.clf()
# Get x coordinate values
x = cd.patch.dimensions[0].centers
# Draw velocity and kappa plot
vel_axes = fig.add_subplot(111) # the velocity scale
# kappa_axes = vel_axes.twinx() # the kappa scale
# Bottom layer velocity
bottom_layer = vel_axes.plot(x,u_2(cd),'k-',label="Bottom Layer Velocity")
# Top Layer velocity
top_layer = vel_axes.plot(x,u_1(cd),'b--',label="Top Layer velocity")
if include_kappa:
# Kappa
kappa_line = kappa_axes.plot(x,kappa(cd),'r-.',label="Kappa")
kappa_axes.plot(x,np.ones(x.shape),'r:')
vel_axes.set_xlabel('km')
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
for ref_line in bathy_ref_lines:
vel_axes.plot([ref_line,ref_line],ylimits_velocities,'k:')
if include_kappa:
vel_axes.set_title("Layer Velocities and Kappa at t = %4.1f s" % cd.t)
else:
vel_axes.set_title("Layer Velocities at t = %4.1f s" % cd.t)
vel_axes.set_ylabel('Velocities (m/s)')
vel_axes.set_xlim(xlimits)
vel_axes.set_ylim(ylimits_velocities)
if include_kappa:
plot.add_legend(vel_axes,'Kappa',location=3,color='r',linestyle='-.')
kappa_axes.set_ylabel('Kappa')
kappa_axes.set_ylim(ylimits_kappa)
else:
vel_axes.legend(loc=3)
try:
mpl.subplots_adjust(hspace=0.1)
except:
pass
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = twin_axes
# ========================================================================
# Combined Top and Internal Surface
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Zoomed Depths',figno=13)
plotfigure.show = True
plotfigure.kwargs = {'figsize':(6,6)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(2,1,1)'
plotaxes.title = 'Surfaces'
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_surface_zoomed
def top_afteraxes(cd):
mpl.xlabel('')
locs,labels = mpl.xticks()
# labels = np.flipud(locs)/1.e3
labels = ['' for i in range(len(locs))]
mpl.xticks(locs,labels)
add_bathy_dashes(cd)
mpl.ylabel('m')
mpl.title("Surfaces t = %4.1f s" % cd.t)
plotaxes.afteraxes = top_afteraxes
plotaxes = fill_items(plotaxes)
# Internal surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(2,1,2)'
plotaxes.title = ''
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_internal_zoomed
def internal_surf_afteraxes(cd):
km_labels(cd)
mpl.title('')
mpl.ylabel('m')
mpl.subplots_adjust(hspace=0.05)
mpl.xticks([-300e3,-200e3,-100e3,-30e3],[300,200,100,30],fontsize=15)
mpl.xlabel('km')
plotaxes.afteraxes = internal_surf_afteraxes
plotaxes = fill_items(plotaxes)
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
# plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_framenos = [0,30,100,200,300]
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| mit |
camptocamp/QGIS | python/plugins/processing/algs/VectorLayerHistogram.py | 1 | 2809 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import *
from qgis.core import *
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterTableField import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import *
from processing.tools import dataobjects
from processing.parameters.ParameterNumber import ParameterNumber
class VectorLayerHistogram(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
FIELD = "FIELD"
BINS = "BINS"
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(uri)
fieldname = self.getParameterValue(self.FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, fieldname)
plt.close()
bins = self.getParameterValue(self.BINS)
plt.hist(values[fieldname], bins)
plotFilename = output +".png"
lab.savefig(plotFilename)
f = open(output, "w")
f.write("<img src=\"" + plotFilename + "\"/>")
f.close()
def defineCharacteristics(self):
self.name = "Vector layer histogram"
self.group = "Graphics"
self.addParameter(ParameterVector(self.INPUT, "Input layer", [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD, "Attribute", self.INPUT,ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterNumber(self.BINS, "number of bins", 2, None, 10))
self.addOutput(OutputHTML(self.OUTPUT, "Output"))
| gpl-2.0 |
nliolios24/textrank | share/doc/networkx-1.9.1/examples/algorithms/blockmodel.py | 32 | 3009 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H=nx.connected_component_subgraphs(G)[0]
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| mit |
ggirelli/gpseq-img-py | pygpseq/anim/series.py | 1 | 12252 | # -*- coding: utf-8 -*-
'''
@author: Gabriele Girelli
@contact: gigi.ga90@gmail.com
@description: contains Series wrapper, which in turn contains Nucleus.
'''
# DEPENDENCIES =================================================================
import math
import os
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label
from pygpseq import const
from pygpseq.tools.binarize import Binarize
from pygpseq.tools import io as iot
from pygpseq.tools import image as imt
from pygpseq.tools import plot
from pygpseq.tools import stat as stt
from pygpseq.tools import string as st
from pygpseq.tools import vector as vt
from pygpseq.anim.nucleus import Nucleus
# CLASSES ======================================================================
class Series(iot.IOinterface):
"""Series (Field of View, i.e., two-channel image) wrapper.
Attributes:
__version__ (string): package version.
n (int): series id (1-indexed).
name (string): series name.
nuclei (list[pygpseq.wraps.Nuclei]): nuclei list.
basedir (string): series folder path.
dna_bg (float): estimated dna channel background.
sig_bg (float): estimated signal channel background.
flist (list): series file info.
"""
__version__ = const.VERSION
n = 0
name = ''
nuclei = []
basedir = '.'
dna_bg = None
sig_bg = None
filist = []
def __init__(self, ds, condition = None, **kwargs):
"""Run IOinterface __init__ method.
Args:
ds (dict): series information list.
condition (pyGPSeq.wraps.Condition): condition wrapper (opt).
"""
# If required, inherit from `condition` wrap
if None != condition:
logpath = condition.logpath
super(Series, self).__init__(path = logpath, append = True)
self.basedir = condition.path
else:
super(Series, self).__init__()
# Save input parameters
self.name = ds[0]
self.filist = ds[1]
self.n = ds[2]
def __getitem__(self, key):
""" Allow get item. """
if key in dir(self):
return(getattr(self, key))
else:
return(None)
def __setitem__(self, key, value):
""" Allow set item. """
if key in dir(self):
self.__setattr__(key, value)
def adjust_options(self, read_only_dna = None, log = None, **kwargs):
"""Adjust options to be passed to the Nucleus class.
Args:
dna_names (tuple[string]): dna channel names.
sig_names (tuple[string]): signal channel names.
an_type (pyGPSeq.const): analysis type.
Returns:
dict: adds the following kwargs:
series_name (string): series wrap name.
basedir (string): series wrap base directory.
dna_ch (numpy.array): image (dimensionality based on an_type).
sig_ch (numpy.array): image (dimensionality based on an_type).
"""
# Start log
if None == log: log = ''
# Only work on dna channel
if None == read_only_dna:
read_only_dna = False
# Add necessary options
kwargs['series_name'] = self.name
kwargs['basedir'] = self.basedir
# Read DNA channel
kwargs['dna_ch'], log = self.get_channel(kwargs['dna_names'],
log, **kwargs)
if not read_only_dna:
kwargs['sig_ch'], log = self.get_channel(kwargs['sig_names'],
log, **kwargs)
# Output
return((kwargs, log))
def export_nuclei(self, **kwargs):
"""Export current series nuclei. """
# Set output suffix
if not 'suffix' in kwargs.keys():
suffix = ''
else:
suffix = st.add_leading_dot(kwargs['suffix'])
# Add necessary options
self.printout('Current series: "' + self.name + '"...', 1)
kwargs, log = self.adjust_options(**kwargs)
# Export nuclei
[n.export(**kwargs) for n in self.nuclei]
# Produce log
log = np.zeros(len(self.nuclei), dtype = const.DTYPE_NUCLEAR_SUMMARY)
for l in [n.get_summary(**kwargs) for n in self.nuclei]:
# Append nuclear data to the series log
summary = [self.n]
summary.extend(l)
log[i, :] = summary
# Export series log
np.savetxt(kwargs['out_dir'] + self.name + '.summary' + suffix + '.csv',
log, delimiter = ',', comments = '',
header = ",".join([h for h in log.dtype.names]))
return(log)
def find_channel(self, channel_names):
"""Return the first channel to correspond to channel_names. """
# Fix the param type
if type(str()) == type(channel_names):
channel_names = [channel_names]
# Cycle through the available channels
for cname in channel_names:
# Identify the requested channel
idx = self.find_channel_id(cname)
# Return the channel
if -1 != idx:
return([i for i in self.filist.items()][idx])
# Return empty dictionary if no matching channel is found
return({})
def find_channel_id(self, channel_name):
"""Return the id of the channel file with the specified name. """
# Retrieve available channel names
names = self.get_channel_names()
if 0 != names.count(channel_name):
# Return matching channel id
return(names.index(channel_name))
else:
# Return -1 if no matching channel is found
return(-1)
def find_nuclei(self, **kwargs):
"""Segment current series.
Args:
**kwargs
dna_names (tuple[string]): dna channel names.
cond_name (string): condition wrapper name.
seg_type (pyGPSeq.const): segmentation type.
rm_z_tips (bool): remove nuclei touching the tips of the stack.
radius_interval (tuple[float]): allowed nuclear radius interval.
offset (tuple[int]): dimensions box/square offset.
aspect (tuple[float]): pixel/voxel dimension proportion.
Returns:
tuple: series current instance and log string.
"""
# Set output suffix
if not 'suffix' in kwargs.keys():
suffix = ''
else:
suffix = st.add_leading_dot(kwargs['suffix'])
# Check plotting
if not 'plotting' in kwargs.keys():
kwargs['plotting'] = True
log = ""
log += self.printout('Current series: "' + self.name + '"...', 1)
# Read images
kwargs, alog = self.adjust_options(read_only_dna = False, **kwargs)
log += alog
# Extract from kwargs
seg_type = kwargs['seg_type']
dna_ch = kwargs['dna_ch']
sig_ch = kwargs['sig_ch']
# Make new channel copy
i = dna_ch.copy()
# Produce a mask
bi = Binarize(path = kwargs['logpath'], append = True, **kwargs)
bi.verbose = self.verbose
mask, thr, tmp_log = bi.run(i)
log += tmp_log
# Estimate background
if None == self.dna_bg:
self.dna_bg = imt.estimate_background(dna_ch, mask, seg_type)
kwargs['dna_bg'] = self.dna_bg
if None == self.sig_bg:
self.sig_bg = imt.estimate_background(sig_ch, mask, seg_type)
kwargs['sig_bg'] = self.sig_bg
log += self.printout('Estimating background:', 2)
log += self.printout('DNA channel: ' + str(kwargs['dna_bg']), 3)
log += self.printout('Signal channel: ' + str(kwargs['sig_bg']), 3)
# Filter object size
mask, tmp_log = bi.filter_obj_XY_size(mask)
log += tmp_log
mask, tmp_log = bi.filter_obj_Z_size(mask)
log += tmp_log
# Save mask
log += self.printout('Saving series object mask...', 2)
L = label(mask)
# Plot
fig = plt.figure()
if 3 == len(mask.shape):
plt.imshow(L.max(0).astype('u4'))
else:
plt.imshow(L.astype('u4'))
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plot.set_font_size(kwargs['font_size'])
title = 'Nuclei in "' + kwargs['cond_name'] + '", ' + str(self.name)
title += ' [' + str(L.max()) + ' objects]'
plt.title(title)
# Export as png
fname = kwargs['out_dir'] + const.OUTDIR_MASK + kwargs['cond_name']
fname += '.' + self.name + '.mask' + suffix + '.png'
if kwargs['plotting']: plot.export(fname, 'png')
# Close plot figure
plt.close(fig)
# Initialize nuclei
log += self.printout('Bounding ' + str(L.max()) + ' nuclei...', 2)
kwargs['logpath'] = self.logpath
kwargs['i'] = i
kwargs['thr'] = thr
kwargs['series_id'] = self.n
seq = range(1, L.max() + 1)
self.nuclei = [Nucleus(n = n, mask = L == n, **kwargs) for n in seq]
return((self, log))
def get_c(self):
"""Return number of channels in the series. """
return(len(self.filist))
def get_channel(self, ch_name, log = None, **kwargs):
"""Read the series specified channel.
Args:
ch_name (string): channel name.
log (string): log string.
**kwargs
Returns:
tuple: channel image and log string.
"""
# Start log (used when verbosity is off)
if None == log: log = ""
log += self.printout('Reading channel "' + str(ch_name) + '"...', 2)
# Read channel
f = self.find_channel(ch_name)
imch = imt.read_tiff(os.path.join(self.basedir, f[0]))
imch = imt.slice_k_d_img(imch, 3)
# Deconvolved images correction
if 'rescale_deconvolved' in kwargs.keys():
if kwargs['rescale_deconvolved']:
# Get DNA scaling factor and rescale
sf = imt.get_rescaling_factor(f, **kwargs)
imch = (imch / sf).astype('float')
msg = 'Rescaling "' + f[0] + '" [' + str(sf) + ']...'
log += self.printout(msg, 3)
# Make Z-projection
if kwargs['an_type'] in [const.AN_SUM_PROJ, const.AN_MAX_PROJ]:
msg = 'Generating Z-projection [' + str(kwargs['an_type']) + ']...'
log += self.printout(msg, 3)
if 2 != len(imch.shape):
imch = imt.mk_z_projection(imch, kwargs['an_type'])
# Prepare output
return((imch, log))
def get_channel_names(self, channel_field = None):
"""Return the names of the channels in the series. """
if None == channel_field:
channel_field = const.REG_CHANNEL_NAME
return([c[channel_field] for c in self.filist.values()])
def get_nuclei_data(self, nuclei_ids, **kwargs):
"""Retrieve a single nucleus from the current series. """
# Read channel images
kwargs, log = self.adjust_options(**kwargs)
# Re-build mask
bi = Binarize(path = self.logpath, append = True, **kwargs)
bi.verbose = self.verbose
mask, thr, tmp_log = bi.run(kwargs['dna_ch'].copy())
log += tmp_log
# Empty nuclear data array
data = []
for nucleus_id in nuclei_ids:
# Select nucleus
n = self.nuclei[nucleus_id -1]
# Setup nucleus instance verbosity
if not self.verbose:
n.verbose = False
# Retrieve nuclear data
ndata, nlog = n.get_data(mask = mask, **kwargs)
# Update log and save nuclear data
log += nlog
data.append(ndata)
return((data, log))
def propagate_attr(self, key):
"""Propagate attribute current value to every nucleus. """
for i in range(len(self.nuclei)):
self.nuclei[i][key] = self[key]
# END ==========================================================================
################################################################################
| mit |
raincoatrun/ThinkStats2 | code/thinkplot.py | 75 | 18140 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
import warnings
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
if cls.color_iter is None:
cls.InitializeIter(7)
return cls.color_iter
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitializeIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (14, 6),
(1, 3): (14, 6),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(plot_number, rows=None, cols=None):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
pyplot.subplot(rows, cols, plot_number)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
global LOC
LOC = None
_Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def _UnderrideColor(options):
if 'color' in options:
return options
color_iter = _Brewer.GetIter()
if color_iter:
try:
options['color'] = next(color_iter)
except StopIteration:
# TODO: reconsider whether this should warn
# warnings.warn('Warning: Brewer ran out of colors.')
_Brewer.ClearIter()
return options
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.8, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Plots a line.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
warnings.warn("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
warnings.warn("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = _Underride(options,
fontsize=16,
verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
LEGEND = True
LOC = None
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
# looks like this is not necessary: matplotlib understands text loc specs
loc_dict = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
global LEGEND
LEGEND = options.get('legend', LEGEND)
if LEGEND:
global LOC
LOC = options.get('loc', LOC)
pyplot.legend(loc=LOC)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(pyplot.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt)
if clf:
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
text = Text
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| gpl-3.0 |
nelson-liu/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
OpenDrift/opendrift | tests/models/test_readers.py | 1 | 29038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
import unittest
from datetime import datetime, timedelta
import numpy as np
from opendrift.models.oceandrift import OceanDrift
from opendrift.models.leeway import Leeway
from opendrift.models.openoil import OpenOil
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_ROMS_native
from opendrift.readers import reader_global_landmask
from opendrift.readers import reader_constant
from opendrift.readers import reader_lazy
from opendrift.readers import reader_from_url
from opendrift.models.pelagicegg import PelagicEggDrift
from opendrift.readers import reader_current_from_track
o = OceanDrift(loglevel=20)
reader_list = [
'www.nonexistingurl.com',
o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc',
'/nonexistingdisk/nonexistingfile.ext',
o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc']
class TestReaders(unittest.TestCase):
"""Tests for readers"""
def test_adding_readers(self):
o = OceanDrift()
landmask = reader_global_landmask.Reader(
extent=[-1.5, 7, 59, 64])
r = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
o.add_reader([r, landmask])
self.assertEqual(o.priority_list['land_binary_mask'],
['roms native', 'global_landmask'])
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
# Switch order
o = OceanDrift()
o.add_reader([landmask, r])
self.assertEqual(o.priority_list['land_binary_mask'],
['global_landmask', 'roms native'])
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
# Test add_readers_from_list
o = OceanDrift()
o.add_readers_from_list(reader_list, lazy=False)
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
self.assertEqual(o.priority_list['x_wind'],
[o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc'])
def test_repeated_run(self):
# NOTE: this test fails if outfile is not None
#outfile = 'leeway_test.nc'
outfile = None
o = OceanDrift(loglevel=50)
o.set_config('drift:vertical_mixing', False)
o.add_readers_from_list(reader_list)
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5, outfile=outfile)
lon1 = o.get_property('lon')[0]
# Repeated run with same object
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5, outfile=outfile)
lon2 = o.get_property('lon')[0]
# Third run, with different config
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12),
wind_drift_factor=.1)
o.run(steps=5)
lon3 = o.get_property('lon')[0]
# Fourth run, with different time
o.reset() # Reset is needed due to new start_time
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 13),
wind_drift_factor=.1)
o.run(steps=5, outfile=outfile)
lon4 = o.get_property('lon')[0]
# Check results
self.assertEqual(lon1[-1][0], lon2[-1][0])
self.assertNotEqual(lon3[-1][0], lon2[-1][0])
#os.remove(outfile)
def test_reader_from_url(self):
readers = reader_from_url(reader_list)
self.assertIsNone(readers[0])
self.assertTrue(isinstance(readers[1],
reader_ROMS_native.Reader))
self.assertIsNone(readers[2])
self.assertTrue(isinstance(readers[3],
reader_netCDF_CF_generic.Reader))
def test_lazy_reader(self):
o = OceanDrift(loglevel=20)
lr = reader_lazy.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertFalse(lr.initialised)
self.assertEqual(len(lr.covers_positions([15], [69])[0]), 1)
self.assertEqual(len(lr.covers_positions([0], [0])[0]), 0)
self.assertTrue(lr.initialised)
# Make a corresponding, unlazy reader
rr = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertEqual(len(rr.covers_positions([15], [69])[0]), 1)
self.assertEqual(len(rr.covers_positions([0], [0])[0]), 0)
# Check that both readers provide the same attributes
for att in rr.__dict__:
self.assertEqual(type(lr.__getattr__(att)),
type(getattr(rr, att)))
if type(getattr(rr, att)) in [float, int, dict, str, list,
datetime, timedelta, bool,
np.float64]:
self.assertEqual(lr.__getattr__(att),
getattr(rr, att))
elif type(getattr(rr, att)) in [np.ndarray]:
self.assertIsNone(np.testing.assert_array_equal(
lr.__getattr__(att),
getattr(rr, att)))
else:
print('Skipping: ' + att + ' ' +
str(type(getattr(rr, att))))
def test_lazy_reader_oildrift(self):
o = OpenOil(loglevel=0)
reader_constant_wind = \
reader_constant.Reader({'x_wind':5, 'y_wind': 6,
'sea_ice_area_fraction': 0})
# Added ice area to prevent problems with masking
# with older versions of netCDF library
o.add_reader(reader_constant_wind)
o.add_readers_from_list(reader_list, lazy=True)
self.assertEqual(len(o._lazy_readers()), 4)
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5)
print(o) # Debug, this fails for old libraries
self.assertEqual(len(o._lazy_readers()), 2)
self.assertEqual(len(o.discarded_readers), 1)
def test_ROMS_native_stranding(self):
o = OceanDrift(loglevel=0)
r = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
o.add_reader(r)
o.set_config('general:use_auto_landmask', False)
o.set_config('drift:vertical_mixing', False)
o.set_config('environment:fallback:x_wind', 0)
o.set_config('environment:fallback:y_wind', 10)
o.seed_elements(lon=15.2, lat=68.3, time=r.start_time,
wind_drift_factor=.02,
number=10, radius=1000)
o.run(steps=8)
self.assertEqual(o.num_elements_deactivated(), 2)
#def test_lazy_readers_and_corrupt_data(self):
# o = OceanDrift(loglevel=0)
# o.add_readers_from_list([o.test_data_folder() +
# '2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc'])
# reader_constant_current_corrupt = \
# reader_constant.Reader({'x_sea_water_velocity': np.nan,
# 'y_sea_water_velocity': np.nan})
# o.add_reader(reader_constant_current_corrupt)
# o.add_readers_from_list([o.test_data_folder() +
# '2Feb2016_Nordic_sigma_3d/Arctic20_1to5Feb_2016.nc'])
# print o
# o.seed_elements(lon=14.5, lat=68, time=datetime(2016,2,4))
# o.set_config('environment:fallback:'x_wind', 0)
# o.set_config('environment:fallback:'y_wind', 0)
# o.set_config('environment:fallback:'x_sea_water_velocity', None)
# o.set_config('environment:fallback:'y_sea_water_velocity', None)
# o.set_config('environment:fallback:'land_binary_mask', 0)
# print o
# o.run(steps=1)
#def test_oildrift_backwards(self):
# o = OpenOil(loglevel=20)
# reader_constant_wind = \
# reader_constant.Reader({'x_wind':5, 'y_wind': 6})
# o.add_reader(reader_constant_wind)
# o.add_readers_from_list(reader_list, lazy=True)
# self.assertEqual(len(o._lazy_readers()), 4)
# o.seed_elements(lon=14, lat=67.85,
# time=datetime(2016, 2, 2, 12))
# o.set_config()
# o.run(steps=5)
# self.assertEqual(len(o._lazy_readers()), 2)
# self.assertEqual(len(o.discarded_readers), 1)
#def test_lazy_reader_oildrift_real(self):
# o = OpenOil(loglevel=0)
# o.add_readers_from_file(o.test_data_folder() +
# '../../opendrift/scripts/data_sources.txt')
# o.seed_elements(lon=4, lat=60.0,
# time=datetime(2018, 7, 2, 12))
# o.run(steps=5)
# print o
def test_lazy_reader_leeway_compare(self):
o1 = Leeway(loglevel=0)
#o1.set_config('environment:fallback:land_binary_mask', 0)
o1.required_variables = [r for r in o1.required_variables
if r != 'land_binary_mask']
o1.add_readers_from_list(reader_list, lazy=False)
time = o1.readers['roms native'].start_time
o1.seed_elements(lat=67.85, lon=14, time=time)
o1.run(steps=5)
o2 = Leeway(loglevel=20)
#o2.set_config('environment:fallback:land_binary_mask', 0)
o2.required_variables = [r for r in o1.required_variables
if r != 'land_binary_mask']
o2.add_readers_from_list(reader_list, lazy=True)
o2.seed_elements(lat=67.85, lon=14, time=time)
o2.run(steps=5)
# Some differences in wind and current components
# due to different coordinate system
for var in o1.history.dtype.names:
if var in ['x_wind', 'y_wind', 'x_sea_water_velocity',
'y_sea_water_velocity']:
tolerance = 1
else:
tolerance = 5
self.assertIsNone(np.testing.assert_array_almost_equal(
o1.history[var], o2.history[var], tolerance))
def test_constant_and_lazy_reader_leeway(self):
cw = reader_constant.Reader({'x_wind':5, 'y_wind': 6})
cc = reader_constant.Reader({'x_sea_water_velocity':0,
'y_sea_water_velocity': .2})
o = Leeway(loglevel=20)
o.add_reader([cw, cc])
o.add_readers_from_list(reader_list)
o.set_config('environment:fallback:x_sea_water_velocity', 0.0)
o.set_config('environment:fallback:y_sea_water_velocity', 0.1)
time = datetime(2016,2,2,12)
o.seed_elements(lat=67.85, lon=14, time=time)
o.run(steps=2)
self.assertAlmostEqual(o.elements.lat[0], 67.8548, 3)
def test_automatic_landmask(self):
o = OceanDrift(loglevel=20)
self.assertRaises(ValueError, o.run)
o.seed_elements(lon=4, lat=60, time=datetime(2016,9,1))
o.run(steps=2)
def test_reader_coverage(self):
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
# Element outside reader domain
self.assertEqual(len(r.covers_positions(5, 80)[0]), 0)
x, y = r.lonlat2xy(5, 80)
self.assertRaises(ValueError, r.check_arguments,
'y_sea_water_velocity', r.start_time, x, y, 0)
# Element inside reader domain
self.assertEqual(len(r.covers_positions(5, 60)[0]), 1)
x, y = r.lonlat2xy(5, 60)
var, time, x2, y2, z2, outside = \
r.check_arguments('y_sea_water_velocity', r.start_time, x, y, 0)
self.assertEqual(var, ['y_sea_water_velocity'])
self.assertEqual(time, r.start_time)
self.assertEqual(x, x2)
self.assertEqual(y, y2)
self.assertEqual(0, z2)
self.assertEqual(len(outside), 0)
def test_outside_reader_time_coverage(self):
o = PelagicEggDrift()
reader = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader(reader)
o.set_config('environment:fallback:x_sea_water_velocity', 1)
o.set_config('environment:fallback:land_binary_mask', 0)
o.set_config('drift:vertical_mixing', False)
o.seed_elements(lon=4.8, lat=60, number=1, time=reader.end_time)
o.run(steps=2)
# Check that fallback value is used when outside time coverage
self.assertEqual(o.history['x_sea_water_velocity'][0][-1], 1.0)
def test_reader_netcdf(self):
"""Check reader functionality."""
reader1 = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
reader2 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
readers = [reader1, reader2]
for r in readers:
print(r)
# Make four points:
# 1) outside lower left, 2) lower left, 3) center of domain
# 4) outside upper right
# and assure that only 2) and 3) are marked as covered
# Upper right is skipped, as lonlat2xy may lie slightly outside
x = np.array([r.xmin - r.delta_x, r.xmin, (r.xmin + r.xmax)/2,
r.xmax + r.delta_x])
y = np.array([r.ymin - r.delta_y, r.ymin, (r.ymin + r.ymax)/2,
r.ymax + r.delta_y])
lons, lats = r.xy2lonlat(x, y)
covered = r.covers_positions(lons, lats, 0)[0]
if len(covered) != 1:
self.assertEqual(covered.tolist(), [1, 2])
else:
if covered == [2]:
print('#'*60)
print('#'*60)
print('WARNING: A point on the boundary is considered ' \
'outside after conversion x,y -> lon,lat -> x,y. ' \
'This is different from "standard", but is due to ' \
'rounding differences and not considered to be an ' \
'error. Numpy version is %s' % (np.__version__))
print('#'*60)
print('#'*60)
else:
self.assertTrue(False) # Should never happen!
self.assertTrue(r.covers_time(r.start_time))
self.assertFalse(r.covers_time(r.start_time - r.time_step))
self.assertFalse(r.proj.crs.is_geographic)
def test_vertical_profiles(self):
norkyst3d = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
lon = np.array([4.73])
lat = np.array([62.35])
variables = ['x_sea_water_velocity', 'x_sea_water_velocity',
'sea_water_temperature']
x,y = norkyst3d.lonlat2xy(lon, lat)
data = norkyst3d.get_variables(variables,
time=norkyst3d.start_time,
x=x, y=y, z=[0, -100])
self.assertEqual(data['z'][4], -25)
self.assertEqual(data['z'][4], -25)
self.assertAlmostEqual(data['sea_water_temperature'][:,0,0][7],
9.220000267028809)
def test_vertical_interpolation(self):
norkyst3d = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
lon = np.array([4.73, 4.75])
lat = np.array([62.35, 62.30])
z = np.array([0, -33])
variables = ['x_sea_water_velocity', 'x_sea_water_velocity',
'sea_water_temperature']
# Call get_variables_interpolated which interpolates both in
# space (horizontally, vertically) and then in time
data, profiles = norkyst3d.get_variables_interpolated(
variables, profiles=['sea_water_temperature'],
profiles_depth = [-100, 0],
time = norkyst3d.start_time + timedelta(seconds=900),
lon=lon, lat=lat, z=z)
# Check surface value
self.assertEqual(data['sea_water_temperature'][0],
profiles['sea_water_temperature'][0,0])
# Check interpolated temperature at 33 m depth
self.assertAlmostEqual(data['sea_water_temperature'][1],
8.32, 2)
#import matplotlib.pyplot as plt
#plt.plot(profiles['sea_water_temperature'][:,0])
#plt.plot(profiles['sea_water_temperature'][:,1], 'r')
#plt.show()
def test_vertical_interpolation_sigma(self):
nordic3d = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
lon = np.array([12.46, 12.46, 12.46])
lat = np.array([68.21, 69.31, 69.31])
z = np.array([-33, 0, -2500])
x, y = nordic3d.lonlat2xy(lon, lat)
variables = ['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature']
# Call get_variables_interpolated which interpolates both in
data = nordic3d.get_variables(variables,
time = nordic3d.start_time + timedelta(seconds=900),
x=x, y=y, z=z)
self.assertAlmostEqual(data['sea_water_temperature'][0,60, 60],
3.447, 2)
#3.59, 2)
self.assertAlmostEqual(data['sea_water_temperature'][-1,60, 60],
-0.783, 2)
#-0.803, 2)
def test_get_environment(self):
o = PelagicEggDrift(loglevel=0)
reader_nordic = reader_ROMS_native.Reader(o.test_data_folder() + '2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc', name='Nordic')
reader_arctic = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '2Feb2016_Nordic_sigma_3d/Arctic20_1to5Feb_2016.nc', name='Arctic')
######################################################
# Vertical interpolation is another issue to be fixed:
reader_nordic.zlevels = reader_arctic.z
######################################################
o.add_reader([reader_nordic, reader_arctic])
# One point covered only by Nordic, two points coverd
# by both readers, and two points covered by none of the readers
testlon = np.array((14.0, 20.0, 20.1, 4, 5))
testlat = np.array((70.1, 76.0, 76.1, 60, 60))
testz = np.random.uniform(0, 0, len(testlon))
self.assertIsNone(np.testing.assert_array_almost_equal(
[0], reader_nordic.covers_positions(testlon, testlat, testz)[0]))
self.assertIsNone(np.testing.assert_array_almost_equal(
[0, 1, 2],
reader_arctic.covers_positions(testlon, testlat, testz)[0]))
o.seed_elements(testlon, testlat, z=testz, time=reader_nordic.start_time)
o.set_config('environment:fallback:land_binary_mask', 0)
env, env_profiles, missing = \
o.get_environment(list(o.required_variables),
reader_nordic.start_time,
testlon, testlat, testz,
o.required_profiles)
self.assertAlmostEqual(env['sea_water_temperature'][0], 4.251, 2)
self.assertAlmostEqual(env['sea_water_temperature'][1], 0.122, 3)
self.assertAlmostEqual(env['sea_water_temperature'][4], 10.0)
self.assertIsNone(np.testing.assert_array_almost_equal(
missing, [False,False,False,False,False]))
self.assertAlmostEqual(env_profiles['sea_water_temperature'][0,0],
4.251, 2)
self.assertAlmostEqual(env_profiles['sea_water_temperature'][0,4], 10)
#self.assertAlmostEqual(env_profiles['sea_water_temperature'][8,2], 10)
self.assertAlmostEqual(env_profiles['sea_water_temperature'][7,2],
2.159, 3)
# Get separate data
env2, env_profiles2, missing2 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
['sea_water_temperature'])
self.assertTrue(env_profiles2 is not None)
self.assertEqual(set(env_profiles2.keys()),
set(['z', 'sea_water_temperature']))
# Get separate data, without profile
env3, env_profiles3, missing3 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
profiles=None)
self.assertTrue(env_profiles3 is None)
# Get separate data
env4, env_profiles4, missing4 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
['sea_water_temperature'])
self.assertIsNone(np.testing.assert_array_almost_equal(
env['x_sea_water_velocity'],
env2['x_sea_water_velocity']))
self.assertIsNone(np.testing.assert_array_almost_equal(
env_profiles2['sea_water_temperature'].ravel(),
env_profiles4['sea_water_temperature'].ravel()))
def test_constant_reader(self):
o = OpenOil(loglevel=0)
cw = reader_constant.Reader({'x_wind':5, 'y_wind': 6})
cc = reader_constant.Reader({'x_sea_water_velocity':0, 'y_sea_water_velocity': .2})
cs = reader_constant.Reader({'sea_water_temperature': 278})
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader([cw, cc, r])
# TODO: should check why adding constant reader with
# sea_water_temperature gives Deprecated warning
#o.add_reader([cw, cc, cs, r])
o.seed_elements(lon=4, lat=60, time=r.start_time, number=5)
o.run(steps=3)
def test_clip_domain(self):
o = OceanDrift(loglevel=50)
r1 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
r1.clip_boundary_pixels(20)
r2 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertEqual(r2.shape, (151, 81))
self.assertEqual(r1.shape, (111, 41))
self.assertEqual(r1.xmin, 20)
o1 = OceanDrift(loglevel=50)
o1.set_config('environment:fallback:x_sea_water_velocity', None)
o1.add_reader(r1)
o1.seed_elements(lon=15, lat=70.1, time=r1.start_time)
o1.set_config('environment:fallback:land_binary_mask', 0)
o1.run(time_step=3600*3, duration=timedelta(hours=48))
o2 = OceanDrift(loglevel=50)
o2.set_config('environment:fallback:x_sea_water_velocity', None)
o2.add_reader(r2)
o2.seed_elements(lon=15, lat=70.1, time=r1.start_time)
o2.set_config('environment:fallback:land_binary_mask', 0)
o2.run(time_step=3600*3, duration=timedelta(hours=48))
# Compare
lat1 = o1.get_property('lat')[0]
lat2 = o2.get_property('lat')[0]
self.assertEqual(len(lat1), 13)
self.assertEqual(len(lat2), 17)
self.assertIsNone(np.testing.assert_allclose(
lat1[0:12], lat2[0:12]))
# Test reader netCDF_CF_generic
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
self.assertEqual(r.shape, (301, 201))
o3 = OceanDrift(loglevel=50)
o3.set_config('environment:fallback:x_sea_water_velocity', None)
o3.set_config('environment:fallback:land_binary_mask', 0)
o3.add_reader(r)
o3.seed_elements(lon=4.36, lat=61.7, time=r.start_time)
o3.run(steps=24)
r.clip_boundary_pixels(10)
self.assertEqual(r.shape, (281, 181))
o4 = OceanDrift(loglevel=50)
o4.set_config('environment:fallback:x_sea_water_velocity', None)
o4.set_config('environment:fallback:land_binary_mask', 0)
o4.add_reader(r)
o4.seed_elements(lon=4.36, lat=61.7, time=r.start_time)
o4.run(steps=24)
# Compare
lat3 = o3.get_property('lat')[0]
lat4 = o4.get_property('lat')[0]
self.assertEqual(len(lat3), 25)
self.assertEqual(len(lat4), 13)
self.assertIsNone(np.testing.assert_allclose(
lat3[0:12], lat4[0:12]))
def test_reader_current_from_track(self):
"""Check if extrapolated currents are of expected value"""
obslon = [3.1, 3.123456]
obslat = [61.1, 61.132198]
obstime = [datetime(2015, 11, 16, 0), datetime(2015, 11, 16, 6)]
o = OceanDrift(loglevel=20)
reader_wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
reader_current = reader_current_from_track.Reader(obslon, obslat, obstime,
wind_east=0, wind_north=0, windreader=reader_wind, wind_factor=0.018)
self.assertAlmostEqual(reader_current.x_sea_water_velocity.data[0],0.2236, 4)
def test_valid_minmax(self):
"""Check that invalid values are replaced with fallback."""
o = OceanDrift(loglevel=20)
from opendrift.readers.basereader import variables
minval = variables.standard_names['x_wind']['valid_min']
# Setting valid_min to -5, to check that replacement works
variables.standard_names['x_wind']['valid_min'] = -5
reader_wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
o.add_reader(reader_wind)
o.set_config('environment:fallback:x_sea_water_velocity', 0)
o.set_config('environment:fallback:x_wind', 2.0)
o.set_config('environment:fallback:y_sea_water_velocity', 0)
o.set_config('environment:fallback:land_binary_mask', 0)
o.seed_elements(lon=4, lat=60, time=reader_wind.start_time)
o.run(steps=1)
variables.standard_names['x_wind']['valid_min'] = minval # reset
w = o.get_property('x_wind')[0][0]
self.assertAlmostEqual(w, 2.0, 1)
def test_valid_minmax_nanvalues(self):
from opendrift.readers.basereader import variables
# Reducing max current speed to test masking
maxval = variables.standard_names['x_sea_water_velocity']['valid_max']
variables.standard_names['x_sea_water_velocity']['valid_max'] = .1
o = OceanDrift(loglevel=20)
o.set_config('environment:fallback:land_binary_mask', 0)
norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
o.add_reader(norkyst)
o.seed_elements(lon=4.95, lat=62, number=10, time=norkyst.start_time)
o.run(steps=2)
variables.standard_names['x_sea_water_velocity']['valid_max'] = maxval # reset
u = o.get_property('x_sea_water_velocity')[0]
self.assertAlmostEqual(u.max(), -.069, 3) # Some numerical error allowed
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
frank-tancf/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
MattNolanLab/Ramsden_MEC | ABAFunctions/ABA_errors.py | 1 | 4010 | '''
Code for error analysis
Copyright (c) 2014, Helen Ramsden
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import Image, ImageChops
import numpy as np
from scipy import ndimage
from GenericFunctions import checkOSpath, adjust_spines,st
import matplotlib.pyplot as plt
plt.rc('ytick', labelsize=12)
plt.rc('xtick', labelsize=12)
plt.rc('axes', labelsize=12)
plt.rc('axes', titlesize=20)
def checksegmented(segmaskfilepath,filedict,resultsfilepath):
'''
FUNCTION runs through all segmented masks and checks location of centre of mass and size of mask
input SegmentedMask/
output is a list containing name of file, size of mask,
'''
newfile = open(resultsfilepath + 'maskstatssize.txt','w')
for f in filedict:
# print f
newfile.write(f )
for filepath in [segmaskfilepath]:#, segmask2filepath]:
maskim = Image.open(filepath+ f).convert('L') # need to convert to 8 bit (not rgb)
maskim = ImageChops.invert(maskim)
maskarray = np.array(maskim)
# print maskarray.shape
com = ndimage.measurements.center_of_mass(maskarray)
blackpixels = np.nonzero(maskarray==0)
whitepixels = np.nonzero(maskarray>0)
# print len(blackpixels[0]),len(whitepixels[0])
masksize = len(blackpixels[0])
newfile.write('\t' + '\t'.join([str(com[0]),str(com[1]),str(masksize)]))
newfile.write('\n')
def plotmi():
'''
Plot the distribution of MI scores from the registration output
'''
milog = np.loadtxt('alllogdata.txt',delimiter = '\t',dtype = float,usecols=[2,3])
diffs = milog[:,0] - milog[:,1]
milognew = np.ma.masked_array(milog, np.isnan(milog))
diffsnew = np.ma.masked_array(diffs, np.isnan(diffs))
# Get rid of nans
milogmaskpre = np.ma.masked_array(milog[:,0],np.isnan(milog[:,0]))
milogmaskpost = np.ma.masked_array(milog[:,1],np.isnan(milog[:,1]))
milogmaskpre = milogmaskpre[milogmaskpre>-1000]
milogmaskpost = milogmaskpost[milogmaskpost>-1000]
fig = plt.figure(figsize = (8,8))
fig.subplots_adjust(bottom=0.2)
fig.subplots_adjust(left=0.2)
ax = fig.add_subplot(1,1,1)
adjust_spines(ax, ['left','bottom'])
cols = ['r','b','k','g','m','y']
# histpre, binspre = np.histogram(milogmaskpre, bins=20)
# histpost, binspre = np.histogram(milogmaskpre, bins=20)
ax.hist(milogmaskpre, bins=20,histtype='step',color='b', range = [-600,0])
ax.hist(milogmaskpost,bins=20,histtype='step',color='g', range = [-600,0]) # normed=True,
[xmin, xmax, ymin, ymax] = ax.axis()
ax.set_yticks([ymin,ymax])
ax.set_yticklabels([int(ymin),int(ymax)], fontsize = 25)
ax.xaxis.set_label_coords(0.5, -0.15)
ax.set_xticks([xmin,xmax])
ax.set_xticklabels([xmin,xmax], fontsize = 25)
ax.set_xlabel('Joint Entropy', fontsize = 25)
ax.set_ylabel('Frequency', fontsize = 25)
ax.yaxis.set_label_coords( -0.15, 0.5)
fig.savefig('MIlogdata.png', transparent = True)
| bsd-3-clause |
ManuSchmi88/landlab | landlab/plot/imshow.py | 3 | 21050 | #! /usr/bin/env python
"""
Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.plot.imshow.imshow_grid
~landlab.plot.imshow.imshow_grid_at_cell
~landlab.plot.imshow.imshow_grid_at_node
"""
import numpy as np
import inspect
from landlab.field.scalar_data_fields import FieldError
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
from landlab.grid import CLOSED_BOUNDARY
from landlab.grid.raster import RasterModelGrid
from landlab.grid.voronoi import VoronoiDelaunayGrid
from landlab.utils.decorators import deprecated
def imshow_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid_at_node(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values
if values_at_node.size != grid.number_of_nodes:
raise ValueError('number of values does not match number of nodes')
values_at_node = np.ma.masked_where(
grid.status_at_node == CLOSED_BOUNDARY, values_at_node)
try:
shape = grid.shape
except AttributeError:
shape = (-1, )
_imshow_grid_values(grid, values_at_node.reshape(shape), **kwds)
if isinstance(values, str):
plt.title(values)
@deprecated(use='imshow_grid_at_node', version='0.5')
def imshow_node_grid(grid, values, **kwds):
imshow_grid_at_node(grid, values, **kwds)
def imshow_grid_at_cell(grid, values, **kwds):
"""Map view of grid data over all grid cells.
Prepares a map view of data over all cells in the grid.
Method can take any of the same ``**kwds`` as :func:`imshow_grid_at_node`.
Construction ::
imshow_grid_at_cell(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Values at the cells on the grid. Alternatively, can be a field name
(string) from which to draw the data from the grid.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
Raises
------
ValueError
If input grid is not uniform rectilinear.
"""
if isinstance(values, str):
try:
values_at_cell = grid.at_cell[values]
except FieldError:
values_at_cell = grid.at_node[values]
else:
values_at_cell = values
if values_at_cell.size == grid.number_of_nodes:
values_at_cell = values_at_cell[grid.node_at_cell]
if values_at_cell.size != grid.number_of_cells:
raise ValueError('number of values must match number of cells or '
'number of nodes')
values_at_cell = np.ma.asarray(values_at_cell)
values_at_cell.mask = True
values_at_cell.mask[grid.core_cells] = False
myimage = _imshow_grid_values(grid,
values_at_cell.reshape(grid.cell_grid_shape),
**kwds)
if isinstance(values, str):
plt.title(values)
return myimage
@deprecated(use='imshow_grid_at_cell', version='0.5')
def imshow_cell_grid(grid, values, **kwds):
imshow_grid_at_cell(grid, values, **kwds)
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=(None, None),
symmetric_cbar=False, cmap='pink', limits=None,
colorbar_label = None,
allow_colorbar=True, vmin=None, vmax=None,
norm=None, shrink=1., color_for_closed='black',
color_for_background=None, show_elements=False,
output=None):
gridtypes = inspect.getmro(grid.__class__)
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.)
if isinstance(grid, RasterModelGrid):
if values.ndim != 2:
raise ValueError('values must have ndim == 2')
y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5
kwds = dict(cmap=cmap)
(kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds['vmin'], kwds['vmax']) = (- limit, limit)
elif limits is not None:
(kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
else:
if vmin is not None:
kwds['vmin'] = vmin
if vmax is not None:
kwds['vmax'] = vmax
if np.isclose(grid.dx, grid.dy):
if values.size == grid.number_of_nodes:
myimage = plt.imshow(
values.reshape(grid.shape), origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
else: # this is a cell grid, and has been reshaped already...
myimage = plt.imshow(values, origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
myimage = plt.pcolormesh(x, y, values, **kwds)
plt.gca().set_aspect(1.)
plt.autoscale(tight=True)
if allow_colorbar:
cb = plt.colorbar(norm=norm, shrink=shrink)
if colorbar_label:
cb.set_label(colorbar_label)
elif VoronoiDelaunayGrid in gridtypes:
# This is still very much ad-hoc, and needs prettifying.
# We should save the modifications needed to plot color all the way
# to the diagram edge *into* the grid, for faster plotting.
# (see http://stackoverflow.com/questions/20515554/...
# colorize-voronoi-diagram)
# (This technique is not implemented yet)
from scipy.spatial import voronoi_plot_2d
import matplotlib.colors as colors
import matplotlib.cm as cmx
cm = plt.get_cmap(cmap)
if (limits is None) and ((vmin is None) and (vmax is None)):
# only want to work with NOT CLOSED nodes
open_nodes = grid.status_at_node != 4
if symmetric_cbar:
(var_min, var_max) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
limit = max(abs(var_min), abs(var_max))
(vmin, vmax) = (- limit, limit)
else:
(vmin, vmax) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
elif limits is not None:
(vmin, vmax) = (limits[0], limits[1])
else:
open_nodes = grid.status_at_node != 4
if vmin is None:
vmin = values.flat[open_nodes].min()
if vmax is None:
vmax = values.flat[open_nodes].max()
cNorm = colors.Normalize(vmin, vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorVal = scalarMap.to_rgba(values)
if show_elements:
myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
show_points=False)
# show_points to be supported in scipy0.18, but harmless for now
mycolors = (i for i in colorVal)
for order in grid.vor.point_region:
region = grid.vor.regions[order]
colortouse = next(mycolors)
if -1 not in region:
polygon = [grid.vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=colortouse)
plt.gca().set_aspect(1.)
# plt.autoscale(tight=True)
# Tempting though it is to move the boundary outboard of the outermost
# nodes (e.g., to the outermost corners), this is a bad idea, as the
# outermost cells tend to have highly elongated shapes which make the
# plot look stupid
plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))
scalarMap.set_array(values)
if allow_colorbar:
cb = plt.colorbar(scalarMap, shrink=shrink)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == '-' and grid_units[0] == '-':
plt.xlabel('X')
plt.ylabel('Y')
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
if plot_name is not None:
plt.title('%s' % (plot_name))
if var_name is not None or var_units is not None:
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + ' (' + var_units + ')'
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = '(' + var_units + ')'
assert type(colorbar_label) is str
assert allow_colorbar
cb.set_label(colorbar_label)
if color_for_background is not None:
plt.gca().set_axis_bgcolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
def imshow_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes or cells in the grid.
Data is plotted as colored cells. If at='node', the surrounding cell is
shaded with the value at the node at its center. If at='cell', the cell
is shaded with its own value. Outer edges of perimeter cells are
extrapolated. Closed elements are colored uniformly (default black,
overridden with kwd 'color_for_closed'); other open boundary nodes get
their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node or cell values, or a field name as a string from which to draw
the data.
at : str, {'node', 'cell'}
Tells plotter where values are defined.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
show = kwds.pop('show', False)
values_at = kwds.pop('values_at', 'node')
values_at = kwds.pop('at', values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == 'node':
imshow_grid_at_node(grid, values, **kwds)
elif values_at == 'cell':
imshow_grid_at_cell(grid, values, **kwds)
else:
raise TypeError('value location %s not understood' % values_at)
# retained for backwards compatibility:
if show:
plt.show()
| mit |
NelisVerhoef/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jbloom/mutpath | src/plot.py | 1 | 10257 | """Module for performing plotting for ``mutpath`` package.
This module uses ``pylab`` and ``matplotlib`` to make plots. These plots will
fail if ``pylab`` and ``matplotlib`` are not available for importation. Before
running any function in this module, you can run the *PylabAvailable*
function to determine if ``pylab`` and ``matplotlib`` are available. Otherwise,
calling any other function will raise an Exception if thise modules are
not available. The ``pdf`` backend is used for ``matplotlib`` / ``pylab``. This means
that plots must be created as PDF files.
Functions are:
`PylabAvailable`
`CumulativeFractionPlot`
'DatesPlot`
`Base10Formatter`
`SplitLabel`
Written by Jesse Bloom.
"""
import os
import sys
import math
# global variable _pylabavailable indicates if pylab/matplotlib present
try:
import matplotlib
matplotlib.use('pdf')
import pylab
_pylabavailable = True
except ImportError:
_pylabavailable = False
def PylabAvailable():
"""Returns True if pylab/matplotlib available, False otherwise.
You should call this function to test for the availability of the
pylab/matplotlib plotting modules before using other functions in
this module.
"""
return _pylabavailable
def DatesPlot(mutdates, plotfile, interval):
"""Plots dates of mutations.
Uses pylab / matplotlib to plot the dates and credible intervals
for mutations. Will raise an error *PylabAvailable() == False*.
The plot is a PDF.
* *mutdates* is a list of the mutations, in the form of the tuples
*(median, mininterval, maxinterval, mut, fractoca, weight)*. Mutations
are plotted in the order they are listed. In these tuples:
* *median* : posterior median date
* *minterval* : minimum of credible interval
* *maxinterval* : maximum of credible interval
* *mut* : string giving name of mutation
* *fractoca* : probability mutation is on path from common ancestor
to starting sequence
* *weight* : fraction of paths containing mutation.
* *plotfile* is a string giving the name of the PDF file we create.
* *interval* is the range of the credible interval. For example, 0.9
means a 90% credible interval.
"""
ext = os.path.splitext(plotfile)[1].lower()
if ext != '.pdf':
raise ValueError("Extension must be .pdf, but found %s" % ext)
if not PylabAvailable():
raise ValueError("pylab / matplotlib not available.")
if not mutdates:
raise ValueError("no mutation dates to plot")
tocalabels = []
tocamedians = []
tocaerrlow = []
tocaerrhigh = []
tocays = []
fromcalabels = []
fromcamedians = []
fromcaerrlow = []
fromcaerrhigh = []
fromcays = []
y = 0
for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates:
label = "%s" % (mut)
errlow = median - mininterval
errhigh = maxinterval - median
if fractoca > 0.5:
tocays.append(y)
tocalabels.append(label)
tocamedians.append(median)
tocaerrlow.append(errlow)
tocaerrhigh.append(errhigh)
else:
fromcays.append(y)
fromcalabels.append(label)
fromcamedians.append(median)
fromcaerrlow.append(errlow)
fromcaerrhigh.append(errhigh)
y += 1
(lmargin, rmargin, bmargin, tmargin) = (0.11, 0.05, 0.08, 0.01)
matplotlib.rc('font', size=10)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('legend', numpoints=1)
matplotlib.rc('legend', fontsize=10)
fig = pylab.figure(figsize=(6, 6))
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - tmargin - bmargin])
tocabar = fromcabar = None
if tocalabels:
tocabar = pylab.errorbar(tocamedians, tocays, xerr=[tocaerrlow, tocaerrhigh], fmt='sr')
if fromcalabels:
fromcabar = pylab.errorbar(fromcamedians, fromcays, xerr=[fromcaerrlow, fromcaerrhigh], fmt='sb')
ny = len(mutdates)
pylab.gca().set_ylim((-1, ny))
pylab.gca().yaxis.set_major_locator(matplotlib.ticker.FixedLocator([y for y in range(ny)]))
pylab.gca().yaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(tocalabels + fromcalabels))
pylab.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
pylab.xlabel("Date (posterior median and Bayesian %.2f%s credible interval)" % (interval * 100, '%'))
if tocabar and fromcabar:
pylab.legend([tocabar[0], fromcabar[0]], ['path to common ancestor', 'path from common ancestor'], loc='lower right')
elif tocabar:
pylab.legend([tocabar[0]], ['path to common ancestor'], loc='lower right')
elif fromcabar:
pylab.legend([fromcabar[0]], ['path from common ancestor'], loc='lower right')
pylab.savefig(plotfile)
def CumulativeFractionPlot(datalist, plotfile, title, xlabel):
"""Creates a cumulative fraction plot.
Takes a list of numeric data. Plots a cumulative fraction
plot giving the fraction of the data points that are <=
the indicated value.
*datalist* is a list of numbers giving the data for which we
are computing the cumulative fraction plot. Raises an
exception if this is an empty list.
*plotfile* is the name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be '.pdf'.
*title* is a string placed above the plot as a title. Uses LaTex
formatting.
*xlabel* is the label given to the X-axis. Uses LaTex formatting.
This function uses pylab / matplotlib. It will raise an Exception if
these modules cannot be imported (if PylabAvailable() is False).
"""
if len(datalist) < 1:
raise ValueError("datalist is empty")
if not _pylabavailable:
raise ImportError("Could not find pylab or matplotlib")
if os.path.splitext(plotfile)[1] != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
datalist.sort() # sort from smallest to largest
(xmin, xmax) = (datalist[0], datalist[-1])
n = len(datalist)
cumfracs = []
cf = 0.0
for x in datalist:
cf += 1. / n
cumfracs.append(cf)
assert len(datalist) == len(cumfracs)
assert abs(1.0 - cf) < 1e-7
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=12)
fig = pylab.figure(figsize=(6, 4))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.01, 0.15, 0.1)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 -\
bmargin - tmargin])
pylab.plot(datalist, cumfracs, 'r-')
pylab.gca().set_ylim([0, 1])
pylab.gca().set_xlim([xmin, xmax])
pylab.ylabel('cumulative fraction')
pylab.xlabel(xlabel)
pylab.title(title)
if plotfile:
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def Base10Formatter(number, exp_cutoff, exp_decimal_digits, decimal_digits):
"""Converts a number into Latex formatting with scientific notation.
Takes a number and converts it to a string that can be shown
in LaTex using math mode. It is converted to scientific notation
if the criteria specified by exp_cutoff.
*number* the number to be formatted, should be a float or integer.
Currently only works for numbers >= 0
*exp_cutoff* convert to scientific notation if abs(math.log10(number)) >= this.
*exp_decimal_digits* show this many digits after the decimal if number
is converted to scientific notation.
*decimal_digits* show this many digits after the decimal if number
is NOT converted to scientific notation.
The returned value is the LaTex' string. If the number is zero, the
returned string is simply '0'.
>>> Base10Formatter(103, 3, 1, 1)
'103.0'
>>> Base10Formatter(103.0, 2, 1, 1)
'1.0 \\\\times 10^{2}'
>>> Base10Formatter(103.0, 2, 2, 1)
'1.03 \\\\times 10^{2}'
>>> Base10Formatter(2892.3, 3, 1, 1)
'2.9 \\\\times 10^{3}'
>>> Base10Formatter(0.0, 3, 1, 1)
'0'
>>> Base10Formatter(0.012, 2, 1, 1)
'1.2 \\\\times 10^{-2}'
>>> Base10Formatter(-0.1, 3, 1, 1)
Traceback (most recent call last):
...
ValueError: number must be >= 0
"""
if number < 0:
raise ValueError('number must be >= 0')
if number == 0:
return '0'
exponent = int(math.log10(number))
if math.log10(number) < exponent and number < 1:
exponent -= 1
if abs(exponent) >= exp_cutoff:
x = number / (10.**exponent)
formatstr = '%.' + '%d' % exp_decimal_digits + 'f \\times 10^{%d}'
return formatstr % (x, exponent)
else:
formatstr = '%.' + '%d' % decimal_digits + 'f'
return formatstr % number
def SplitLabel(label, splitlen, splitchar):
"""Splits a string with a return if it exceeds a certain length.
*label* a string giving the label we might split.
*splitlen* the maximum length of a label before we attempt to
split it.
*splitchar* the character added when splitting a label.
If len(*label*) > *splitlen*, we attempt to split the label in the
middle by adding *splitchar*. The label is split as close to the
middle as possible while splitting at a space.
No splitting as label length less than *splitlen*
>>> SplitLabel('WT virus 1', 10, '\\n')
'WT virus 1'
Splitting of this label
>>> SplitLabel('WT plasmid 1', 10, '\\n')
'WT\\nplasmid 1'
Splitting of this label
>>> SplitLabel('mutated WT plasmid 1', 10, '\\n')
'mutated WT\\nplasmid 1'
"""
if len(label) <= splitlen:
return label
else:
j = 0
imid = len(label) // 2
index = None
while 0 <= imid - j <= imid + j < len(label):
if label[imid - j].isspace():
return "%s%s%s" % (label[ : imid - j], splitchar, label[imid - j + 1 : ])
elif label[imid + j].isspace():
return "%s%s%s" % (label[ : imid + j], splitchar, label[imid + j + 1 : ])
j += 1
else:
return label # no white space to split
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
xavierwu/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
FernanOrtega/DAT210x | Module3/notes/2Dscatter_example.py | 1 | 1245 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 21:14:57 2017
@author: fernando
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df = pd.read_csv('concrete.csv')
print df.describe()
# Plot 1
df.plot.scatter(x='cement', y='strength')
plt.suptitle('Cement vs str')
plt.xlabel('Cement')
plt.ylabel('Str')
# Plot 2
df.plot.scatter(x='slag', y='strength')
plt.suptitle('slag vs str')
plt.xlabel('slag')
plt.ylabel('Str')
# Plot 3
df.plot.scatter(x='ash', y='strength')
plt.suptitle('ash vs str')
plt.xlabel('ash')
plt.ylabel('Str')
# Plot 4
df.plot.scatter(x='water', y='strength')
plt.suptitle('water vs str')
plt.xlabel('water')
plt.ylabel('Str')
# Plot 5
df.plot.scatter(x='superplastic', y='strength')
plt.suptitle('superplastic vs str')
plt.xlabel('superplastic')
plt.ylabel('Str')
# Plot 6
df.plot.scatter(x='coarseagg', y='strength')
plt.suptitle('coarseagg vs str')
plt.xlabel('coarseagg')
plt.ylabel('Str')
# Plot 7
df.plot.scatter(x='fineagg', y='strength')
plt.suptitle('fineagg vs str')
plt.xlabel('fineagg')
plt.ylabel('Str')
# Plot 8
df.plot.scatter(x='age', y='strength')
plt.suptitle('age vs str')
plt.xlabel('age')
plt.ylabel('Str')
plt.show() | mit |
Nelca/buildMLSystem | ch04/blei_lda.py | 3 | 1602 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from gensim import corpora, models, similarities
from mpltools import style
import matplotlib.pyplot as plt
import numpy as np
from os import path
style.use('ggplot')
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=corpus.id2word, alpha=None)
for ti in xrange(84):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
print('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
print()
print()
print()
thetas = [model[c] for c in corpus]
plt.hist([len(t) for t in thetas], np.arange(42))
plt.ylabel('Nr of documents')
plt.xlabel('Nr of topics')
plt.savefig('../1400OS_04_01+.png')
model1 = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=corpus.id2word, alpha=1.)
thetas1 = [model1[c] for c in corpus]
#model8 = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word, alpha=1.e-8)
#thetas8 = [model8[c] for c in corpus]
plt.clf()
plt.hist([[len(t) for t in thetas], [len(t) for t in thetas1]], np.arange(42))
plt.ylabel('Nr of documents')
plt.xlabel('Nr of topics')
plt.text(9, 223, r'default alpha')
plt.text(26, 156, 'alpha=1.0')
plt.savefig('../1400OS_04_02+.png')
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/widgets/menu.py | 3 | 4882 | import numpy as np
import matplotlib
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib.mathtext as mathtext
import matplotlib.pyplot as plt
import matplotlib.artist as artist
import matplotlib.image as image
class ItemProperties:
def __init__(self, fontsize=14, labelcolor='black', bgcolor='yellow',
alpha=1.0):
self.fontsize = fontsize
self.labelcolor = labelcolor
self.bgcolor = bgcolor
self.alpha = alpha
self.labelcolor_rgb = colors.colorConverter.to_rgb(labelcolor)
self.bgcolor_rgb = colors.colorConverter.to_rgb(bgcolor)
class MenuItem(artist.Artist):
parser = mathtext.MathTextParser("Bitmap")
padx = 5
pady = 5
def __init__(self, fig, labelstr, props=None, hoverprops=None,
on_select=None):
artist.Artist.__init__(self)
self.set_figure(fig)
self.labelstr = labelstr
if props is None:
props = ItemProperties()
if hoverprops is None:
hoverprops = ItemProperties()
self.props = props
self.hoverprops = hoverprops
self.on_select = on_select
x, self.depth = self.parser.to_mask(
labelstr, fontsize=props.fontsize, dpi=fig.dpi)
if props.fontsize!=hoverprops.fontsize:
raise NotImplementedError(
'support for different font sizes not implemented')
self.labelwidth = x.shape[1]
self.labelheight = x.shape[0]
self.labelArray = np.zeros((x.shape[0], x.shape[1], 4))
self.labelArray[:, :, -1] = x/255.
self.label = image.FigureImage(fig, origin='upper')
self.label.set_array(self.labelArray)
# we'll update these later
self.rect = patches.Rectangle((0,0), 1,1)
self.set_hover_props(False)
fig.canvas.mpl_connect('button_release_event', self.check_select)
def check_select(self, event):
over, junk = self.rect.contains(event)
if not over:
return
if self.on_select is not None:
self.on_select(self)
def set_extent(self, x, y, w, h):
print x, y, w, h
self.rect.set_x(x)
self.rect.set_y(y)
self.rect.set_width(w)
self.rect.set_height(h)
self.label.ox = x+self.padx
self.label.oy = y-self.depth+self.pady/2.
self.rect._update_patch_transform()
self.hover = False
def draw(self, renderer):
self.rect.draw(renderer)
self.label.draw(renderer)
def set_hover_props(self, b):
if b:
props = self.hoverprops
else:
props = self.props
r, g, b = props.labelcolor_rgb
self.labelArray[:, :, 0] = r
self.labelArray[:, :, 1] = g
self.labelArray[:, :, 2] = b
self.label.set_array(self.labelArray)
self.rect.set(facecolor=props.bgcolor, alpha=props.alpha)
def set_hover(self, event):
'check the hover status of event and return true if status is changed'
b,junk = self.rect.contains(event)
changed = (b != self.hover)
if changed:
self.set_hover_props(b)
self.hover = b
return changed
class Menu:
def __init__(self, fig, menuitems):
self.figure = fig
fig.suppressComposite = True
self.menuitems = menuitems
self.numitems = len(menuitems)
maxw = max([item.labelwidth for item in menuitems])
maxh = max([item.labelheight for item in menuitems])
totalh = self.numitems*maxh + (self.numitems+1)*2*MenuItem.pady
x0 = 100
y0 = 400
width = maxw + 2*MenuItem.padx
height = maxh+MenuItem.pady
for item in menuitems:
left = x0
bottom = y0-maxh-MenuItem.pady
item.set_extent(left, bottom, width, height)
fig.artists.append(item)
y0 -= maxh + MenuItem.pady
fig.canvas.mpl_connect('motion_notify_event', self.on_move)
def on_move(self, event):
draw = False
for item in self.menuitems:
draw = item.set_hover(event)
if draw:
self.figure.canvas.draw()
break
fig = plt.figure()
fig.subplots_adjust(left=0.3)
props = ItemProperties(labelcolor='black', bgcolor='yellow',
fontsize=15, alpha=0.2)
hoverprops = ItemProperties(labelcolor='white', bgcolor='blue',
fontsize=15, alpha=0.2)
menuitems = []
for label in ('open', 'close', 'save', 'save as', 'quit'):
def on_select(item):
print 'you selected', item.labelstr
item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
on_select=on_select)
menuitems.append(item)
menu = Menu(fig, menuitems)
plt.show()
| gpl-2.0 |
musically-ut/statsmodels | statsmodels/tsa/vector_ar/dynamic.py | 27 | 9932 | # pylint: disable=W0201
from statsmodels.compat.python import iteritems, string_types, range
import numpy as np
from statsmodels.tools.decorators import cache_readonly
import pandas as pd
from . import var_model as _model
from . import util
from . import plotting
FULL_SAMPLE = 0
ROLLING = 1
EXPANDING = 2
def _get_window_type(window_type):
if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):
return window_type
elif isinstance(window_type, string_types):
window_type_up = window_type.upper()
if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
return FULL_SAMPLE
elif window_type_up == 'ROLLING':
return ROLLING
elif window_type_up == 'EXPANDING':
return EXPANDING
raise Exception('Unrecognized window type: %s' % window_type)
class DynamicVAR(object):
"""
Estimates time-varying vector autoregression (VAR(p)) using
equation-by-equation least squares
Parameters
----------
data : pandas.DataFrame
lag_order : int, default 1
window : int
window_type : {'expanding', 'rolling'}
min_periods : int or None
Minimum number of observations to require in window, defaults to window
size if None specified
trend : {'c', 'nc', 'ct', 'ctt'}
TODO
Returns
-------
**Attributes**:
coefs : WidePanel
items : coefficient names
major_axis : dates
minor_axis : VAR equation names
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.names = list(data.columns)
self.neqs = len(self.names)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = _get_window_type(window_type)
if self._is_rolling:
if window is None:
raise Exception('Must pass window when doing rolling '
'regression')
if min_periods is None:
min_periods = window
else:
window = len(self.x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
@cache_readonly
def T(self):
"""
Number of time periods in results
"""
return len(self.result_index)
@property
def nobs(self):
# Stub, do I need this?
data = dict((eq, r.nobs) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
@cache_readonly
def equations(self):
eqs = {}
for col, ts in iteritems(self.y):
model = pd.ols(y=ts, x=self.x, window=self._window,
window_type=self._window_type,
min_periods=self._min_periods)
eqs[col] = model
return eqs
@cache_readonly
def coefs(self):
"""
Return dynamic regression coefficients as WidePanel
"""
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.beta
panel = pd.WidePanel.fromDict(data)
# Coefficient names become items
return panel.swapaxes('items', 'minor')
@property
def result_index(self):
return self.coefs.major_axis
@cache_readonly
def _coefs_raw(self):
"""
Reshape coefficients to be more amenable to dynamic calculations
Returns
-------
coefs : (time_periods x lag_order x neqs x neqs)
"""
coef_panel = self.coefs.copy()
del coef_panel['intercept']
coef_values = coef_panel.swapaxes('items', 'major').values
coef_values = coef_values.reshape((len(coef_values),
self.lag_order,
self.neqs, self.neqs))
return coef_values
@cache_readonly
def _intercepts_raw(self):
"""
Similar to _coefs_raw, return intercept values in easy-to-use matrix
form
Returns
-------
intercepts : (T x K)
"""
return self.coefs['intercept'].values
@cache_readonly
def resid(self):
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.resid
return pd.DataFrame(data)
def forecast(self, steps=1):
"""
Produce dynamic forecast
Parameters
----------
steps
Returns
-------
forecasts : pandas.DataFrame
"""
output = np.empty((self.T - steps, self.neqs))
y_values = self.y.values
y_index_map = dict((d, idx) for idx, d in enumerate(self.y.index))
result_index_map = dict((d, idx) for idx, d in enumerate(self.result_index))
coefs = self._coefs_raw
intercepts = self._intercepts_raw
# can only produce this many forecasts
forc_index = self.result_index[steps:]
for i, date in enumerate(forc_index):
# TODO: check that this does the right thing in weird cases...
idx = y_index_map[date] - steps
result_idx = result_index_map[date] - steps
y_slice = y_values[:idx]
forcs = _model.forecast(y_slice, coefs[result_idx],
intercepts[result_idx], steps)
output[i] = forcs[-1]
return pd.DataFrame(output, index=forc_index, columns=self.names)
def plot_forecast(self, steps=1, figsize=(10, 10)):
"""
Plot h-step ahead forecasts against actual realizations of time
series. Note that forecasts are lined up with their respective
realizations.
Parameters
----------
steps :
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=figsize, nrows=self.neqs,
sharex=True)
forc = self.forecast(steps=steps)
dates = forc.index
y_overlay = self.y.reindex(dates)
for i, col in enumerate(forc.columns):
ax = axes[i]
y_ts = y_overlay[col]
forc_ts = forc[col]
y_handle = ax.plot(dates, y_ts.values, 'k.', ms=2)
forc_handle = ax.plot(dates, forc_ts.values, 'k-')
fig.legend((y_handle, forc_handle), ('Y', 'Forecast'))
fig.autofmt_xdate()
fig.suptitle('Dynamic %d-step forecast' % steps)
# pretty things up a bit
plotting.adjust_subplots(bottom=0.15, left=0.10)
plt.draw_if_interactive()
@property
def _is_rolling(self):
return self._window_type == ROLLING
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
data = dict((eq, r.r2) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
class DynamicPanelVAR(DynamicVAR):
"""
Dynamic (time-varying) panel vector autoregression using panel ordinary
least squares
Parameters
----------
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.neqs = len(data.columns)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _filter_data(lhs, rhs):
"""
Data filtering routine for dynamic VAR
lhs : DataFrame
original data
rhs : DataFrame
lagged variables
Returns
-------
"""
def _has_all_columns(df):
return np.isfinite(df.values).sum(1) == len(df.columns)
rhs_valid = _has_all_columns(rhs)
if not rhs_valid.all():
pre_filtered_rhs = rhs[rhs_valid]
else:
pre_filtered_rhs = rhs
index = lhs.index.union(rhs.index)
if not index.equals(rhs.index) or not index.equals(lhs.index):
rhs = rhs.reindex(index)
lhs = lhs.reindex(index)
rhs_valid = _has_all_columns(rhs)
lhs_valid = _has_all_columns(lhs)
valid = rhs_valid & lhs_valid
if not valid.all():
filt_index = rhs.index[valid]
filtered_rhs = rhs.reindex(filt_index)
filtered_lhs = lhs.reindex(filt_index)
else:
filtered_rhs, filtered_lhs = rhs, lhs
return filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid
def _make_lag_matrix(x, lags):
data = {}
columns = []
for i in range(1, 1 + lags):
lagstr = 'L%d.'% i
lag = x.shift(i).rename(columns=lambda c: lagstr + c)
data.update(lag._series)
columns.extend(lag.columns)
return pd.DataFrame(data, columns=columns)
class Equation(object):
"""
Stub, estimate one equation
"""
def __init__(self, y, x):
pass
if __name__ == '__main__':
import pandas.util.testing as ptest
ptest.N = 500
data = ptest.makeTimeDataFrame().cumsum(0)
var = DynamicVAR(data, lag_order=2, window_type='expanding')
var2 = DynamicVAR(data, lag_order=2, window=10,
window_type='rolling')
| bsd-3-clause |
jblackburne/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
mmaelicke/scikit-gstat | skgstat/plotting/stvariogram_plot3d.py | 1 | 3989 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
try:
import plotly.graph_objects as go
except ImportError:
pass
def __calculate_plot_data(stvariogram, **kwargs):
xx, yy = stvariogram.meshbins
z = stvariogram.experimental
# x = xx.flatten()
# y = yy.flatten()
# apply the model
nx = kwargs.get('x_resolution', 100)
nt = kwargs.get('t_resolution', 100)
# model spacing
_xx, _yy = np.mgrid[
0:np.nanmax(stvariogram.xbins):nx * 1j,
0:np.nanmax(stvariogram.tbins):nt * 1j
]
model = stvariogram.fitted_model
lags = np.vstack((_xx.flatten(), _yy.flatten())).T
# apply the model
_z = model(lags)
return xx.T, yy.T, z, _xx, _yy, _z
def matplotlib_plot_3d(stvariogram, kind='scatter', ax=None, elev=30, azim=220, **kwargs):
# get the data, spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
x = xx.flatten()
y = yy.flatten()
# some settings
c = kwargs.get('color', kwargs.get('c', 'b'))
cmap = kwargs.get('model_color', kwargs.get('cmap', 'terrain'))
alpha = kwargs.get('alpha', 0.8)
depthshade = kwargs.get('depthshade', False)
# handle the axes
if ax is not None:
if not isinstance(ax, Axes3D):
raise ValueError('The passed ax object is not an instance of mpl_toolkis.mplot3d.Axes3D.')
fig = ax.get_figure()
else:
fig = plt.figure(figsize=kwargs.get('figsize', (10, 10)))
ax = fig.add_subplot(111, projection='3d')
# do the plot
ax.view_init(elev=elev, azim=azim)
if kind == 'surf':
ax.plot_trisurf(x, y, z, color=c, alpha=alpha)
elif kind == 'scatter':
ax.scatter(x, y, z, c=c, depthshade=depthshade)
else:
raise ValueError('%s is not a valid 3D plot' % kind)
# add the model
if not kwargs.get('no_model', False):
ax.plot_trisurf(_xx.flatten(), _yy.flatten(), _z, cmap=cmap, alpha=alpha)
# labels:
ax.set_xlabel('space')
ax.set_ylabel('time')
ax.set_zlabel('semivariance [%s]' % stvariogram.estimator.__name__)
# return
return fig
def plotly_plot_3d(stvariogram, kind='scatter', fig=None, **kwargs):
# get the data spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
# get some settings
c = kwargs.get('color', kwargs.get('c', 'black'))
cmap = kwargs.get('model_color', kwargs.get('colorscale', kwargs.get('cmap', 'Electric')))
alpha = kwargs.get('opacity', kwargs.get('alpha', 0.6))
# handle the figue
if fig is None:
fig = go.Figure()
# do the plot
if kind == 'surf':
fig.add_trace(
go.Surface(
x=xx,
y=yy,
z=z.reshape(xx.shape),
opacity=0.8 * alpha,
colorscale=[[0, c], [1, c]],
name='experimental variogram'
)
)
elif kind == 'scatter' or kwargs.get('add_points', False):
fig.add_trace(
go.Scatter3d(
x=xx.flatten(),
y=yy.flatten(),
z=z,
mode='markers',
opacity=alpha,
marker=dict(color=c, size=kwargs.get('size', 4)),
name='experimental variogram'
)
)
# add the model
if not kwargs.get('no_model', False):
fig.add_trace(
go.Surface(
x=_xx,
y=_yy,
z=_z.reshape(_xx.shape),
opacity=max(1, alpha * 1.2),
colorscale=cmap,
name='%s model' % stvariogram.model.__name__
)
)
# set some labels
fig.update_layout(scene=dict(
xaxis_title='space',
yaxis_title='time',
zaxis_title='semivariance [%s]' % stvariogram.estimator.__name__
))
# return
return fig
| mit |
mjvakili/ccppabc | ccppabc/code/test_data.py | 1 | 4243 | '''
Test the data.py module
'''
import numpy as np
import matplotlib.pyplot as plt
import util
import data as Data
# --- Halotools ---
from halotools.empirical_models import PrebuiltHodModelFactory
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
def PlotCovariance(obvs, Mr=21, b_normal=0.25, inference='mcmc'):
''' Plot the covariance matrix for a specified obvs
'''
# import the covariance matrix
covar = Data.data_cov(Mr=Mr, b_normal=b_normal, inference=inference)
if obvs == 'xi':
obvs_cov = covar[1:16 , 1:16]
r_bin = Data.xi_binedges()
elif obvs == 'gmf':
obvs_cov = covar[17:, 17:]
binedges = Data.data_gmf_bins()
r_bin = 0.5 * (binedges[:-1] + binedges[1:])
n_bin = int(np.sqrt(obvs_cov.size))
# calculate the reduced covariance for plotting
red_covar = np.zeros([n_bin, n_bin])
for ii in range(n_bin):
for jj in range(n_bin):
red_covar[ii][jj] = obvs_cov[ii][jj]/np.sqrt(obvs_cov[ii][ii] * obvs_cov[jj][jj])
prettyplot()
fig = plt.figure()
sub = fig.add_subplot(111)
cont = sub.pcolormesh(r_bin, r_bin, red_covar, cmap=plt.cm.afmhot_r)
plt.colorbar(cont)
sub.set_xlim([r_bin[0], r_bin[-1]])
sub.set_ylim([r_bin[0], r_bin[-1]])
sub.set_xscale('log')
sub.set_yscale('log')
sub.set_xlabel(r'$\mathtt{r}\;[\mathtt{Mpc/h}$]', fontsize=25)
sub.set_ylabel(r'$\mathtt{r}\;[\mathtt{Mpc/h}$]', fontsize=25)
fig_file = ''.join([util.fig_dir(),
obvs.upper(), 'covariance',
'.Mr', str(Mr),
'.bnorm', str(round(b_normal,2)),
'.', inference, '_inf.png'])
fig.savefig(fig_file, bbox_inches='tight')
plt.close()
return None
# ---- Plotting ----
def xi(Mr=20, Nmock=500):
'''
Plot xi(r) of the fake observations
'''
prettyplot()
pretty_colors = prettycolors()
xir, cii = Data.data_xi(Mr=Mr, Nmock=Nmock)
rbin = Data.data_xi_bins(Mr=Mr)
fig = plt.figure(1)
sub = fig.add_subplot(111)
sub.plot(rbin, rbin*xir, c='k', lw=1)
sub.errorbar(rbin, rbin*xir, yerr = rbin*cii**0.5 , fmt="ok", ms=1, capsize=2, alpha=1.)
sub.set_xlim([0.1, 15])
sub.set_ylim([1, 10])
sub.set_yscale("log")
sub.set_xscale("log")
sub.set_xlabel(r'$\mathtt{r}\; (\mathtt{Mpc})$', fontsize=25)
sub.set_ylabel(r'$\mathtt{r} \xi_{\rm gg}$', fontsize=25)
fig_file = ''.join([util.fig_dir(),
'xi.Mr', str(Mr), '.Nmock', str(Nmock), '.png'])
fig.savefig(fig_file, bbox_inches='tight')
plt.close()
return None
def gmf(Mr=20, Nmock=500):
'''
Plot Group Multiplicty Function of fake observations
'''
prettyplot()
pretty_colors = prettycolors()
# import fake obs GMF
gmf, sig_gmf = Data.data_gmf(Mr=Mr, Nmock=Nmock)
# group richness bins
gmf_bin = Data.data_gmf_bins()
fig = plt.figure(1)
sub = fig.add_subplot(111)
sub.errorbar(
0.5*(gmf_bin[:-1]+gmf_bin[1:]), gmf, yerr=sig_gmf,
fmt="ok", capsize=1.0
)
sub.set_xlim([1, 60])
sub.set_yscale('log')
sub.set_ylabel(r"Group Multiplicity Function (h$^{3}$ Mpc$^{-3}$)", fontsize=20)
sub.set_xlabel(r"$\mathtt{Group\;\;Richness}$", fontsize=20)
# save to file
fig_file = ''.join([util.fig_dir(),
'gmf.Mr', str(Mr), '.Nmock', str(Nmock), '.png'])
fig.savefig(fig_file, bbox_inches='tight')
return None
# ---- tests -----
def xi_binning_tests(Mr=20):
model = PrebuiltHodModelFactory('zheng07', threshold = -1.0*np.float(Mr))
rbins = np.concatenate([np.array([0.1]), np.logspace(np.log10(0.5), np.log10(20.), 15)])
print 'R bins = ', rbins
for ii in xrange(10):
model.populate_mock() # population mock realization
#rbins = np.logspace(-1, np.log10(20.), 16)
r_bin, xi_r = model.mock.compute_galaxy_clustering(rbins=rbins)
print xi_r
def test_nbar(Mr=21, b_normal=0.25):
print Data.data_nbar(Mr=Mr, b_normal=b_normal)
if __name__=='__main__':
PlotCovariance('gmf', inference='mcmc')
#test_nbar()
#xi_cov(Mr=20, Nmock=500)
#xi_binning_tests(Mr=20)
| mit |
Andreea-G/Codds_DarkMatter | src/experiment_HaloIndep_Band.py | 1 | 59260 | """
Copyright (c) 2015 Andreea Georgescu
Created on Wed Mar 4 00:47:37 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO! This only works for CDMSSi!
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from experiment_HaloIndep import *
import interp_uniform as unif
# from interp import interp1d
from scipy import interpolate
from scipy.optimize import brentq, minimize
from basinhopping import *
import matplotlib.pyplot as plt
import os # for speaking
import parallel_map as par
DEBUG = F
DEBUG_FULL = F
USE_BASINHOPPING = T
ADAPT_KWARGS = F
ALLOW_MOVE = T
class ConstraintsFunction(object):
""" Class to implement the constraints function that will be passed as an argunent
to the minimization routines.
Input:
args: Arguments needed for calculating the constraints:
vminStar, logetaStar, vminStar_index
"""
def __init__(self, *args):
self.vminStar = args[0]
self.logetaStar = args[1]
self.vminStar_index = args[2]
self.vmin_max = 2000
def __call__(self, x, close=True):
"""
Input:
x: ndarray
Returns:
constraints: ndarray
Constraints vector, where each value must be >= 0 for the
constraint to be specified. Contains:
0 - 8: bounds: 3 * (x.size/2) constraints = 9 for x.size/2 = 3
9 - 12: sorted array: 2 * (x.size/2 - 1) constraints = 4 for x.size/2 = 3
13 - 15: vminStar_index: x.size/2 constraints = 3 for x.size/2 = 3
16 - 18: vminStar and logetaStar: x.size/2 constraints = 3 for x.size/2 = 3
"""
constraints = np.concatenate([x[:x.size/2], self.vmin_max - x[:x.size/2], -x[x.size/2:],
np.diff(x[:x.size/2]), np.diff(-x[x.size/2:]),
(x[:x.size/2] - self.vminStar) * (-x[x.size/2:] + self.logetaStar),
self.vminStar - x[:self.vminStar_index],
x[self.vminStar_index: x.size/2] - self.vminStar,
x[x.size/2: x.size/2 + self.vminStar_index] - self.logetaStar,
self.logetaStar - x[x.size/2 + self.vminStar_index:]])
if close:
is_not_close = np.logical_not(np.isclose(constraints, np.zeros_like(constraints), atol=1e-5))
is_not_close[:3 * (x.size/2)] = True
constraints = np.where(is_not_close, constraints, np.abs(constraints))
if np.any(np.isnan(constraints)):
raise ValueError
return constraints
class Experiment_EHI(Experiment_HaloIndep):
""" Class implementing the extended maximum likelihood halo-independent (EHI)
method to obtain the confidence band for experiments with potential signals and
unbinned data (arXiv:1507.03902).
Input:
expername: string
The name of the experiment.
scattering_type: string
The type of scattering. Can be
- 'SI' (spin-independent)
- 'SDAV' (spin-dependent, axial-vector)
- 'SDPS' (spin-dependent, pseudo-scalar)
mPhi: float, optional
The mass of the mediator.
method: str, optional
Type of minimization solver to be passed as a parameter to the minimization
routine. Can be 'SLSQP' or 'COBYLA'.
"""
def __init__(self, expername, scattering_type, mPhi=mPhiRef, method='SLSQP'):
super().__init__(expername, scattering_type, mPhi)
module = import_file(INPUT_DIR + expername + ".py")
self.ERecoilList = module.ERecoilList
self.mu_BKG_i = module.mu_BKG_i
self.NBKG = module.NBKG
self.method = method
def _VMinSortedList(self, mx, fp, fn, delta):
""" Computes the list of vmin corresponsing to measured recoil energies,
sorted in increasing order. Will be useful as starting guesses.
"""
self.vmin_sorted_list = np.sort(VMin(self.ERecoilList, self.mT[0], mx, delta))
return
def ResponseTables(self, vmin_min, vmin_max, vmin_step, mx, fp, fn, delta,
output_file_tail):
""" Computes response tables
- self.diff_response_tab is a table of [vmin, DifferentialResponse(Eee_i)]
pairs for each vmin in the range [vminmin, vminmax], corresponding to measured
recoil energies Eee_i. It is a 3D matrix where
axis = 0 has dimension self.ERecoilList.size()
axis = 1 has dimension vmin_list.size() + 1 (where + 1 is because we
prepend zeros for vmin = 0)
axis = 2 has dimension 2 for the pairs of [vmin, diff_response].
- self.response_tab is a table of [vmin, Response] pairs for each vmin
in the range [vminmin, vminmax], corresponding to DifferentialResponse
integrated over the full energy range. It is a 2D matrix where
axis = 1 has dimension vmin_list.size() + 1 (where +1 is because we
prepend zeros for vmin = 0)
axis = 2 has dimension 2 for the pairs of [vmin, diff_response].
Input:
vmin_min, vmin_max, vmin_step: float
Vmin range and vmin step size.
mx, fp, fn, delta: float
output_file_tail: string
Tag to be added to the file name since the results for
self.vmin_sorted_list, self.diff_response_tab and self.response_tab
are each written to files.
"""
self._VMinSortedList(mx, fp, fn, delta)
file = output_file_tail + "_VminSortedList.dat"
print(file)
np.savetxt(file, self.vmin_sorted_list)
if delta == 0:
branches = [1]
else:
branches = [1, -1]
self.vmin_linspace = np.linspace(vmin_min, vmin_max,
(vmin_max - vmin_min)/vmin_step + 1)
self.diff_response_tab = np.zeros((self.ERecoilList.size, 1))
self.response_tab = np.zeros(1)
self.curly_H_tab = np.zeros((self.ERecoilList.size, 1))
self.xi_tab = np.zeros(1)
xi = 0
vmin_prev = 0
for vmin in self.vmin_linspace:
print("vmin =", vmin)
diff_resp_list = np.zeros((1, len(self.ERecoilList)))
resp = 0
curly_H = np.zeros((1, len(self.ERecoilList)))
for sign in branches:
(ER, qER, const_factor) = self.ConstFactor(vmin, mx, fp, fn, delta, sign)
v_delta = min(VminDelta(self.mT, mx, delta))
diff_resp_list += np.array([self.DifferentialResponse(Eee, qER, const_factor)
for Eee in self.ERecoilList])
resp += integrate.quad(self.DifferentialResponse, self.Ethreshold, self.Emaximum,
args=(qER, const_factor), epsrel=PRECISSION, epsabs=0)[0]
curly_H += np.array([[integrate.quad(self.DifferentialResponse_Full, v_delta, vmin,
args=(Eee, mx, fp, fn, delta, sign),
epsrel=PRECISSION, epsabs=0)[0]
for Eee in self.ERecoilList]])
xi += self.Exposure * \
self.IntegratedResponse(vmin_prev, vmin,
self.Ethreshold, self.Emaximum,
mx, fp, fn, delta)
vmin_prev = vmin
self.diff_response_tab = \
np.append(self.diff_response_tab, diff_resp_list.transpose(), axis=1)
self.response_tab = np.append(self.response_tab, [resp], axis=0)
self.curly_H_tab = np.append(self.curly_H_tab, curly_H.transpose(), axis=1)
# counts/kg/keVee
self.xi_tab = np.append(self.xi_tab, [xi], axis=0)
# counts * day
self.vmin_linspace = np.insert(self.vmin_linspace, 0., 0)
file = output_file_tail + "_VminLinspace.dat"
print(file)
np.savetxt(file, self.vmin_linspace)
file = output_file_tail + "_DiffRespTable.dat"
print(file)
np.savetxt(file, self.diff_response_tab)
file = output_file_tail + "_RespTable.dat"
print(file)
np.savetxt(file, self.response_tab)
file = output_file_tail + "_CurlyHTable.dat"
print(file)
np.savetxt(file, self.curly_H_tab)
file = output_file_tail + "_XiTable.dat"
print(file)
np.savetxt(file, self.xi_tab)
os.system("say Finished response tables.")
return
def PlotTable(self, func, dimension=0, xlim=None, ylim=None,
title=None, plot_close=True, plot_show=True, show_zero_axis=False):
""" Plots response tables.
Input:
func: callable
Function or list of functions of v that should be plotted.
dimension: int
0 (if there's only one function) or
1 (if there are a list of functions).
xlim, ylim: float
Axis limits for the plots.
title: string
Plot title.
plot_close, plot_show: bool
Whether to call plt.close() before and plt.show() after.
show_zero_axis: bool
Whether to show a horizontal line at zero.
"""
if plot_close:
plt.close()
if dimension == 0:
# only one function
plt.plot(self.vmin_linspace, np.array([func(v)
for v in self.vmin_linspace]))
elif dimension == 1:
# list of interpolated functions for each energy in self.ERecoilList
for i in range(self.ERecoilList.size):
plt.plot(self.vmin_linspace, np.array([func[i](v)
for v in self.vmin_linspace]))
else:
print("Wrong dimension")
raise TypeError
if show_zero_axis:
plt.plot(self.vmin_linspace, np.zeros(self.vmin_linspace.size))
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if title is not None:
plt.title(title)
if plot_show:
plt.show()
def ImportResponseTables(self, output_file_tail, plot=True):
""" Imports the data for the response tables from files.
"""
file = output_file_tail + "_VminSortedList.dat"
with open(file, 'r') as f_handle:
self.vmin_sorted_list = np.loadtxt(f_handle)
file = output_file_tail + "_VminLinspace.dat"
with open(file, 'r') as f_handle:
self.vmin_linspace = np.loadtxt(f_handle)
file = output_file_tail + "_DiffRespTable.dat"
with open(file, 'r') as f_handle:
self.diff_response_tab = np.loadtxt(f_handle)
file = output_file_tail + "_RespTable.dat"
with open(file, 'r') as f_handle:
self.response_tab = np.loadtxt(f_handle)
file = output_file_tail + "_CurlyHTable.dat"
with open(file, 'r') as f_handle:
self.curly_H_tab = np.loadtxt(f_handle)
file = output_file_tail + "_XiTable.dat"
with open(file, 'r') as f_handle:
self.xi_tab = np.loadtxt(f_handle)
self.diff_response_interp = np.array([unif.interp1d(self.vmin_linspace, dr)
for dr in self.diff_response_tab])
self.response_interp = unif.interp1d(self.vmin_linspace, self.response_tab)
self.curly_H_interp = np.array([unif.interp1d(self.vmin_linspace, h)
for h in self.curly_H_tab])
if plot:
self.PlotTable(self.diff_response_interp, dimension=1)
self.PlotTable(self.response_interp, dimension=0)
self.PlotTable(self.curly_H_interp, dimension=1, title='Curly H')
return
def VminIntegratedResponseTable(self, vmin_list):
return np.array([[integrate.quad(self.diff_response_interp[i],
vmin_list[a], vmin_list[a + 1],
epsrel=PRECISSION, epsabs=0)[0]
for a in range(vmin_list.size - 1)]
for i in range(self.ERecoilList.size)])
def IntegratedResponseTable(self, vmin_list):
return np.array([integrate.quad(self.response_interp,
vmin_list[a], vmin_list[a + 1],
epsrel=PRECISSION, epsabs=0)[0]
for a in range(vmin_list.size - 1)])
def _MinusLogLikelihood(self, vars_list, vminStar=None, logetaStar=None,
vminStar_index=None):
""" Compute -log(L)
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)]
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
Returns:
-log(L): float
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
else:
vmin_list_w0 = np.insert(vars_list[: vars_list.size/2],
vminStar_index, vminStar)
logeta_list = np.insert(vars_list[vars_list.size/2:],
vminStar_index, logetaStar)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
vmin_resp_integr = self.VminIntegratedResponseTable(vmin_list_w0)
resp_integr = self.IntegratedResponseTable(vmin_list_w0)
mu_i = self.Exposure * np.dot(vmin_resp_integr, 10**logeta_list)
Nsignal = self.Exposure * np.dot(10**logeta_list, resp_integr)
if vminStar is None:
self.gamma_i = (self.mu_BKG_i + mu_i) / self.Exposure
# counts/kg/keVee/days
result = self.NBKG + Nsignal - np.log(self.mu_BKG_i + mu_i).sum()
if np.any(self.mu_BKG_i + mu_i < 0):
raise ValueError
return result
def MinusLogLikelihood(self, vars_list, constr_func=None, vminStar=None,
logetaStar=None, vminStar_index=None):
""" Computes -log(L) and tests whether constraints are satisfied.
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)].
constr_func: callable, optional
Ffunction of vars_list giving an array of values each corresponding to
a constraint. If the values are > 0 the constraints are satisfied.
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
vminStar_index: int, optional
Index corresponding to the position of vminStar in the array of vmin
steps.
Returns:
-log(L) if all constraints are valid, and the result of an artificial
function that grows with the invalid constraints if not all constraints
are valid.
"""
constraints = constr_func(vars_list)
constr_not_valid = constraints < 0
if DEBUG_FULL:
print("*** vars_list =", repr(vars_list))
if DEBUG_FULL:
print("vminStar =", vminStar)
print("logetaStar =", logetaStar)
print("constraints =", repr(constraints))
print("constr_not_valid =", repr(constr_not_valid))
try:
return self._MinusLogLikelihood(vars_list, vminStar=vminStar,
logetaStar=logetaStar,
vminStar_index=vminStar_index)
except:
if np.any(constr_not_valid):
constr_list = constraints[constr_not_valid]
if DEBUG_FULL:
print("Constraints not valid!!")
print("constr sum =", -constr_list.sum())
return min(max(-constr_list.sum(), 0.001) * 1e6, 1e6)
else:
print("Error!!")
raise
def OptimalLikelihood(self, output_file_tail, logeta_guess):
""" Finds the best-fit piecewise constant eta function corresponding to the
minimum MinusLogLikelihood, and prints the results to file (value of the minimum
MinusLogLikelihood and the corresponding values of vmin, logeta steps.
Input:
output_file_tail: string
Tag to be added to the file name.
logeta_guess: float
Guess for the value of log(eta) in the minimization procedure.
"""
self.ImportResponseTables(output_file_tail, plot=False)
vars_guess = np.append(self.vmin_sorted_list,
logeta_guess * np.ones(self.vmin_sorted_list.size))
print("vars_guess =", vars_guess)
vmin_max = self.vmin_linspace[-1]
def constr_func(x, vmin_max=vmin_max):
""" 0 - 8: bounds: 3 * (x.size/2) constraints = 9 for x.size/2 = 3
9 - 12: sorted array: 2 * (x.size/2 - 1) constraints = 4 for x.size/2 = 3
"""
constraints = np.concatenate([x[:x.size/2], vmin_max - x[:x.size/2],
-x[x.size/2:],
np.diff(x[:x.size/2]), np.diff(-x[x.size/2:])])
is_not_close = np.logical_not(
np.isclose(constraints, np.zeros_like(constraints), atol=1e-5))
is_not_close[:3 * (x.size/2)] = T
constr = np.where(is_not_close, constraints, np.abs(constraints))
if DEBUG:
print("***constr =", repr(constr))
print("tf =", repr(constr < 0))
return constr
constr = ({'type': 'ineq', 'fun': constr_func})
np.random.seed(0)
if USE_BASINHOPPING:
minimizer_kwargs = {"constraints": constr, "args": (constr_func,)}
optimum_log_likelihood = basinhopping(self.MinusLogLikelihood, vars_guess,
minimizer_kwargs=minimizer_kwargs,
niter=30, stepsize=0.1)
else:
optimum_log_likelihood = minimize(self.MinusLogLikelihood, vars_guess,
args=(constr_func,), constraints=constr)
print(optimum_log_likelihood)
print("MinusLogLikelihood =", self._MinusLogLikelihood(optimum_log_likelihood.x))
print("vars_guess =", repr(vars_guess))
file = output_file_tail + "_GloballyOptimalLikelihood.dat"
print(file)
np.savetxt(file, np.append([optimum_log_likelihood.fun],
optimum_log_likelihood.x))
os.system("say 'Finished finding optimum'")
return
def ImportOptimalLikelihood(self, output_file_tail, plot=False):
""" Import the minumum -log(L) and the locations of the steps in the best-fit
logeta function.
Input:
output_file_tail: string
Tag to be added to the file name.
plot: bool, optional
Whether to plot response tables.
"""
self.ImportResponseTables(output_file_tail, plot=False)
file = output_file_tail + "_GloballyOptimalLikelihood.dat"
with open(file, 'r') as f_handle:
optimal_result = np.loadtxt(f_handle)
self.optimal_logL = optimal_result[0]
self.optimal_vmin = optimal_result[1: optimal_result.size/2 + 1]
self.optimal_logeta = optimal_result[optimal_result.size/2 + 1:]
print("optimal result =", optimal_result)
if plot:
self._MinusLogLikelihood(optimal_result[1:]) # to get self.gamma_i
self.xi_interp = unif.interp1d(self.vmin_linspace, self.xi_tab)
self.h_sum_tab = np.sum([self.curly_H_tab[i] / self.gamma_i[i]
for i in range(self.optimal_vmin.size)], axis=0)
self.q_tab = 2 * (self.xi_tab - self.h_sum_tab)
self.h_sum_interp = unif.interp1d(self.vmin_linspace, self.h_sum_tab)
self.q_interp = unif.interp1d(self.vmin_linspace, self.q_tab)
file = output_file_tail + "_HSumTable.dat"
print(file)
np.savetxt(file, self.h_sum_tab)
file = output_file_tail + "_QTable.dat"
print(file)
np.savetxt(file, self.q_tab)
self.PlotTable(self.xi_interp, dimension=0, plot_show=False)
self.PlotTable(self.h_sum_interp, dimension=0,
xlim=[0, 2000], ylim=[-2e24, 2e24],
title='Xi, H_sum', plot_close=False)
self.PlotTable(self.q_interp, dimension=0,
xlim=[0, 2000], ylim=[-2e24, 2e24],
title='q', show_zero_axis=True)
return
def _PlotStepFunction(self, vmin_list, logeta_list,
xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
mark=None, color=None, linewidth=1,
plot_close=True, plot_show=True):
""" Plots a step-like function, given the location of the steps.
"""
if plot_close:
plt.close()
print(vmin_list)
print(logeta_list)
x = np.append(np.insert(vmin_list, 0, 0), vmin_list[-1] + 0.1)
y = np.append(np.insert(logeta_list, 0, logeta_list[0]), -80)
if color is not None:
plt.step(x, y, color=color, linewidth=linewidth)
if mark is not None:
plt.plot(x, y, mark, color=color)
else:
plt.step(x, y, linewidth=linewidth)
if mark is not None:
plt.plot(x, y, mark)
# plt.xlim([vmin_list[0] * xlim_percentage[0], vmin_list[-1] * xlim_percentage[1]])
plt.xlim([0, 1000])
plt.ylim([max(logeta_list[-1] * ylim_percentage[0], -60),
max(logeta_list[0] * ylim_percentage[1], -35)])
if plot_show:
plt.show()
return
def PlotOptimum(self, xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
color='red', linewidth=1,
plot_close=True, plot_show=True):
""" Plots the best-fit eta(vmin) step function.
"""
self._PlotStepFunction(self.optimal_vmin, self.optimal_logeta,
xlim_percentage=xlim_percentage,
ylim_percentage=ylim_percentage,
color=color, linewidth=linewidth,
plot_close=plot_close, plot_show=plot_show)
return
def PlotConstrainedOptimum(self, vminStar, logetaStar, vminStar_index,
xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
plot_close=True, plot_show=True):
""" Plots the eta(vmin) function given the location of vminStar and logetaStar.
"""
self._PlotStepFunction(self.optimal_vmin, self.optimal_logeta,
plot_close=plot_close, plot_show=False)
x = np.insert(self.constr_optimal_vmin, vminStar_index, vminStar)
y = np.insert(self.constr_optimal_logeta, vminStar_index, logetaStar)
self._PlotStepFunction(x, y,
xlim_percentage=xlim_percentage,
ylim_percentage=ylim_percentage,
plot_close=False, plot_show=False, mark='x', color='k')
plt.plot(vminStar, logetaStar, '*')
if plot_show:
plt.show()
return
def _ConstrainedOptimalLikelihood(self, vminStar, logetaStar, vminStar_index):
""" Finds the constrained minimum MinusLogLikelihood for given vminStar,
logetaStar and vminStar_index.
Input:
vminStar, logetaStar: float
Location of the constrained step.
vminStar_index: int
Index of vminStar in the list of vmin steps of the constrained optimum
logeta function.
Returns:
constr_optimal_logl: float
The constrained minimum MinusLogLikelihood
"""
if DEBUG:
print("~~~~~ vminStar_index =", vminStar_index)
vmin_guess_left = np.array([self.optimal_vmin[ind]
if self.optimal_vmin[ind] < vminStar
else vminStar * (1 - 0.001*(vminStar_index - ind))
for ind in range(vminStar_index)])
vmin_guess_right = np.array([self.optimal_vmin[ind]
if self.optimal_vmin[ind] > vminStar
else vminStar * (1 + 0.001*(ind - vminStar_index - 1))
for ind in range(vminStar_index, self.optimal_vmin.size)])
vmin_guess = np.append(vmin_guess_left, vmin_guess_right)
logeta_guess = self.optimal_logeta
logeta_guess_left = np.maximum(logeta_guess[:vminStar_index],
np.ones(vminStar_index)*logetaStar)
logeta_guess_right = np.minimum(logeta_guess[vminStar_index:],
np.ones(logeta_guess.size - vminStar_index) *
logetaStar)
logeta_guess = np.append(logeta_guess_left, logeta_guess_right)
vars_guess = np.append(vmin_guess, logeta_guess)
constr_func = ConstraintsFunction(vminStar, logetaStar, vminStar_index)
constr = ({'type': 'ineq', 'fun': constr_func})
args = (constr_func, vminStar, logetaStar, vminStar_index)
sol_not_found = True
attempts = 3
np.random.seed(1)
random_variation = 1e-5
if USE_BASINHOPPING:
class TakeStep(object):
def __init__(self, stepsize=0.1):
pass
self.stepsize = stepsize
def __call__(self, x):
x[:x.size/2] += np.random.uniform(-5. * self.stepsize,
5. * self.stepsize,
x[x.size/2:].shape)
x[x.size/2:] += np.random.uniform(-self.stepsize,
self.stepsize, x[x.size/2:].shape)
return x
take_step = TakeStep()
class AdaptiveKwargs(object):
def __init__(self, kwargs, random_variation=random_variation):
self.kwargs = kwargs
self.random_variation = random_variation
def __call__(self):
new_kwargs = {}
random_factor_vminStar = \
(1 + self.random_variation * np.random.uniform(-1, 1))
random_factor_logetaStar = \
(1 + self.random_variation * np.random.uniform(-1, 1))
constr_func_args = (self.kwargs['args'][1] * random_factor_vminStar,
self.kwargs['args'][2] * random_factor_logetaStar,
self.kwargs['args'][3])
constr_func = ConstraintsFunction(*constr_func_args)
new_kwargs['args'] = (constr_func,) + constr_func_args
new_kwargs['constraints'] = ({'type': 'ineq', 'fun': constr_func})
if 'method' in self.kwargs:
new_kwargs['method'] = self.kwargs['method']
return new_kwargs
minimizer_kwargs = {"constraints": constr, "args": args, "method": self.method}
if ADAPT_KWARGS:
adapt_kwargs = AdaptiveKwargs(minimizer_kwargs, random_variation)
else:
adapt_kwargs = None
while sol_not_found and attempts > 0:
try:
if USE_BASINHOPPING:
constr_optimum_log_likelihood = \
basinhopping(self.MinusLogLikelihood, vars_guess,
minimizer_kwargs=minimizer_kwargs, niter=5,
take_step=take_step, adapt_kwargs=adapt_kwargs,
stepsize=0.2)
else:
constr_optimum_log_likelihood = \
minimize(self.MinusLogLikelihood, vars_guess,
args=args, constraints=constr, method=self.method)
constraints = constr_func(constr_optimum_log_likelihood.x)
is_not_close = np.logical_not(np.isclose(constraints,
np.zeros_like(constraints)))
constr_not_valid = np.logical_and(constraints < 0, is_not_close)
sol_not_found = np.any(constr_not_valid)
except ValueError:
sol_not_found = True
pass
attempts -= 1
args = (constr_func,
vminStar * (1 + random_variation * np.random.uniform(-1, 1)),
logetaStar * (1 + random_variation * np.random.uniform(-1, 1)),
vminStar_index)
if USE_BASINHOPPING:
minimizer_kwargs = {"constraints": constr, "args": args}
if DEBUG and sol_not_found:
print(attempts, "attempts left! ####################################" +
"################################################################")
print("sol_not_found =", sol_not_found)
if sol_not_found:
if DEBUG:
print("ValueError: sol not found")
raise ValueError
if DEBUG:
print(constr_optimum_log_likelihood)
print("kwargs =", constr_optimum_log_likelihood.minimizer.kwargs)
print("args =", constr_optimum_log_likelihood.minimizer.kwargs['args'])
print("optimum_logL =", self.optimal_logL)
print("constraints=", repr(constraints))
print("constr_not_valid =", repr(constr_not_valid))
print("vars_guess =", repr(vars_guess))
print("optimum_logL =", self.optimal_logL)
print("vminStar_index =", vminStar_index)
return constr_optimum_log_likelihood
def ConstrainedOptimalLikelihood(self, vminStar, logetaStar, plot=False):
""" Finds the constrained minimum MinusLogLikelihood for given vminStar,
logetaStar. Finds the minimum for all vminStar_index, and picks the best one.
Input:
vminStar, logetaStar: float
Location of constrained step.
plot: bool, optional
Whether to plot the constrained piecewice-constant logeta function.
Returns:
constr_optimal_logl: float
The constrained minimum MinusLogLikelihood
"""
vminStar_index = 0
while vminStar_index < self.optimal_vmin.size and \
vminStar > self.optimal_vmin[vminStar_index]:
vminStar_index += 1
try:
constr_optimum_log_likelihood = \
self._ConstrainedOptimalLikelihood(vminStar, logetaStar, vminStar_index)
except ValueError:
optim_logL = 10**6
pass
else:
optim_logL = constr_optimum_log_likelihood.fun
original_optimum = constr_optimum_log_likelihood
vminStar_index_original = vminStar_index
index = vminStar_index
while ALLOW_MOVE and index > 0:
try:
index -= 1
new_optimum = \
self._ConstrainedOptimalLikelihood(vminStar, logetaStar, index)
except ValueError:
pass
else:
if new_optimum.fun < optim_logL:
os.system("say Moved left")
print("Moved left, index is now", index)
print("############################################################" +
"############################################################")
vminStar_index = index
constr_optimum_log_likelihood = new_optimum
optim_logL = constr_optimum_log_likelihood.fun
index = vminStar_index_original
while ALLOW_MOVE and index < self.optimal_vmin.size:
try:
index += 1
new_optimum = self._ConstrainedOptimalLikelihood(vminStar, logetaStar,
index)
except ValueError:
pass
else:
if new_optimum.fun < optim_logL:
os.system("say Moved right")
print("Moved right, index is now", index)
print("############################################################" +
"############################################################")
vminStar_index = index
constr_optimum_log_likelihood = new_optimum
optim_logL = constr_optimum_log_likelihood.fun
if optim_logL == 10**6:
raise ValueError
self.constr_optimal_logl = constr_optimum_log_likelihood.fun
vars_result = constr_optimum_log_likelihood.x
self.constr_optimal_vmin = vars_result[: vars_result.size/2]
self.constr_optimal_logeta = vars_result[vars_result.size/2:]
if plot:
print("vminStar =", vminStar)
print("logetaStar =", logetaStar)
print("vminStar_index =", vminStar_index)
try:
print("original:", original_optimum)
except:
print("Original failed.")
pass
try:
print("new:", constr_optimum_log_likelihood)
print(constr_optimum_log_likelihood.minimizer.kwargs['args'])
except:
print("All attepts failed.")
pass
try:
vminStar_rand = constr_optimum_log_likelihood.minimizer.kwargs['args'][1]
logetaStar_rand = constr_optimum_log_likelihood.minimizer.kwargs['args'][2]
constr_func = ConstraintsFunction(vminStar_rand, logetaStar_rand,
vminStar_index)
constraints = constr_func(constr_optimum_log_likelihood.x)
is_not_close = np.logical_not(np.isclose(constraints,
np.zeros_like(constraints)))
constr_not_valid = np.logical_and(constraints < 0, is_not_close)
sol_not_found = np.any(constr_not_valid)
print("random vminStar =", vminStar_rand)
print("random logetaStar =", logetaStar_rand)
print("x =", constr_optimum_log_likelihood.x)
print("constraints =", constraints)
print("is_not_close =", is_not_close)
print("constr_not_valid =", constr_not_valid)
print("sol_not_found =", sol_not_found)
except:
print("Error")
pass
os.system("say 'Finished plot'")
self.PlotConstrainedOptimum(vminStar_rand, logetaStar_rand, vminStar_index,
xlim_percentage=(0., 1.1),
ylim_percentage=(1.2, 0.8))
return self.constr_optimal_logl
def VminSamplingList(self, output_file_tail, vmin_min, vmin_max, vmin_num_steps,
steepness_vmin=1.5, steepness_vmin_center=2.5, plot=False):
""" Finds a non-linear way to sample the vmin range, such that more points are
sampled near the location of the steps of the best-fit logeta function, and
fewer in between. This is done by building a function of vmin that is steeper
near the steps and flatter elsewhere, and the steeper this function the more
samplings are done in this region.
Input:
output_file_tail: string
Tag to be added to the file name.
vmin_min, vmin_max: float
Range in vmin where the sampling should be made.
vmin_num_steps: int
Number of samples in vmin (approximate, the final number of steps is
not exact, due to taking floor() in some places.
steepness_vmin: float, optional
Parameter related to the steepness of this function to the left of the
leftmost step and to the right of the rightmost step.
steepness_vmin_center: float, optional
Similar parameter, but for the steepness in between the leftmost step
and the rightmost step.
plot: bool, optional
Whether to plot intermediate results such as the sampling function.
"""
self.ImportOptimalLikelihood(output_file_tail)
xmin = vmin_min
xmax = vmin_max
# TODO! This +4 is to compensate for a loss of ~4 points (not always 4 though),
# and it's due to taking floor later on.
# Find a better way to deal with this.
x_num_steps = vmin_num_steps # + 4
s = steepness_vmin
sc = steepness_vmin_center
x_lin = np.linspace(xmin, xmax, 1000)
x0_list = self.optimal_vmin
numx0 = x0_list.size
print("x0 =", x0_list)
def UnitStep(x): return (np.sign(x) + 1) / 2
def g1(x, x0, s0, xmin=xmin):
return np.log10(UnitStep(x - x0) +
UnitStep(x0 - x) *
(x0 - xmin) / (x + 10**s0 * (-x + x0) - xmin))
def g2(x, x0, s0, xmax=xmax):
return np.log10(UnitStep(x0 - x) +
UnitStep(x - x0) *
(x + 10**s0 * (-x + x0) - xmax) / (x0 - xmax))
def g(x, x0, s1, s2): return g1(x, x0, s1) + g2(x, x0, s2)
s_list = np.array([[s, sc]] + [[sc, sc]] * (numx0 - 2) + [[sc, s]])
def g_total(x, sign=1, x0=x0_list, s_list=s_list):
return np.array([sign * g(x, x0_list[i], s_list[i, 0], s_list[i, 1])
for i in range(x0_list.size)]).prod(axis=0)
g_lin = g_total(x_lin)
xT_guess = (x0_list[:-1] + x0_list[1:]) / 2
bounds = np.array([(x0_list[i], x0_list[i + 1])
for i in range(x0_list.size - 1)])
x_turns_max = np.array([minimize(g_total, np.array(xT_guess[i]),
args=(-1,), bounds=[bounds[i]]).x
for i in range(0, xT_guess.size, 2)])
x_turns_min = np.array([minimize(g_total, np.array(xT_guess[i]),
bounds=[bounds[i]]).x
for i in range(1, xT_guess.size, 2)])
x_turns = np.sort(np.append(x_turns_max, x_turns_min))
x_turns = np.append(np.insert(x_turns, 0, xmin), [xmax])
y_turns = g_total(x_turns)
print("x_turns =", x_turns)
print("y_turns =", y_turns)
def g_inverse(y, x1, x2):
return brentq(lambda x: g_total(x) - y, x1, x2)
def g_inverse_list(y_list, x1, x2):
return np.array([g_inverse(y, x1, x2) for y in y_list])
y_diff = np.diff(y_turns)
y_diff_sum = np.abs(y_diff).sum()
print("y_diff =", y_diff)
num_steps = np.array([max(1, np.floor(x_num_steps * np.abs(yd)/y_diff_sum))
for yd in y_diff])
print("num_steps =", num_steps)
y_list = np.array([np.linspace(y_turns[i], y_turns[i+1], num_steps[i])
for i in range(num_steps.size)])
x_list = np.array([g_inverse_list(y_list[i], x_turns[i], x_turns[i+1])
for i in range(y_list.size)])
x_list = np.concatenate(x_list)
y_list = np.concatenate(y_list)
x_list = x_list[np.array([x_list[i] != x_list[i+1]
for i in range(x_list.size - 1)] + [True])]
y_list = y_list[np.array([y_list[i] != y_list[i+1]
for i in range(y_list.size - 1)] + [True])]
self.vmin_sampling_list = x_list
if plot:
plt.close()
plt.plot(x_lin, g_lin)
plt.plot(x_turns, y_turns, 'o')
plt.plot(x_list, y_list, '*')
plt.xlim([xmin, xmax])
plt.ylim([min(-s * sc**(numx0 - 1), np.min(y_turns)),
max(s * sc**(numx0 - 1), np.max(y_turns))])
plt.show()
return
def OptimumStepFunction(self, vmin):
""" Best-fit logeta as a function of vmin for the optimal log(L).
Input:
vmin: float
Value of vmin for which to evaluate logeta.
Returns:
logeta: float
log(eta(vmin)) for the best-fit piecewise constant function.
"""
index = 0
while index < self.optimal_vmin.size and vmin > self.optimal_vmin[index]:
index += 1
if index == self.optimal_vmin.size:
return self.optimal_logeta[-1]*10
return self.optimal_logeta[index]
def VminLogetaSamplingTable(self, output_file_tail, logeta_percent_minus,
logeta_percent_plus, logeta_num_steps,
linear_sampling=True, steepness_logeta=1, plot=False):
""" Finds a non-linear way to sample both the vmin and logeta range, such that
more points are sampled near the location of the steps of the best-fit logeta
function, and fewer in between. This uses the sampling in vmin done by
VminSamplingList, and computes a non-linear sampling in logeta in a similar way
(by building a function of logeta that is steeper near the steps and flatter
elsewhere, and the steeper this function the more samplings are done in this
region).
Input:
output_file_tail: string
Tag to be added to the file name.
logeta_percent_minus, logeta_percent_plus: float
Range in logeta where the sampling should be made, given as percentage
in the negative and positive direction of the best-fit logeta.
logeta_num_steps: int
Number of samples in logeta.
steepness_logeta: float, optional
Parameter related to the steepness of this sampling function in logeta.
plot: bool, optional
Whether to plot intermediate results such as the sampling function.
"""
print(self.optimal_vmin)
print(self.optimal_logeta)
logeta_num_steps_minus = logeta_num_steps * \
logeta_percent_minus / (logeta_percent_minus + logeta_percent_plus)
logeta_num_steps_plus = logeta_num_steps * \
logeta_percent_plus / (logeta_percent_minus + logeta_percent_plus)
s = steepness_logeta
def f(x, xm, i, s0=s):
return (xm - x) / (10**s0 - 1) * 10**i + (10**s0 * x - xm) / (10**s0 - 1)
self.vmin_logeta_sampling_table = []
vmin_last_step = self.optimal_vmin[-1]
if linear_sampling:
for vmin in self.vmin_sampling_list:
logeta_opt = self.OptimumStepFunction(min(vmin, vmin_last_step))
if vmin < self.optimal_vmin[0]:
logeta_min = logeta_opt * (1 + 0.6 * logeta_percent_minus)
logeta_max = logeta_opt * (1 - logeta_percent_plus)
else:
if vmin < 600:
logeta_min = logeta_opt * (1 + logeta_percent_minus)
else:
logeta_min = logeta_opt * (1 + 0.6 * logeta_percent_minus)
logeta_max = logeta_opt * (1 - 0.5 * logeta_percent_plus)
logeta_list = [[vmin, logeta]
for logeta in np.linspace(logeta_min, logeta_max,
logeta_num_steps)]
self.vmin_logeta_sampling_table += [logeta_list]
else:
for vmin in self.vmin_sampling_list:
logeta_opt = self.OptimumStepFunction(min(vmin, vmin_last_step))
logeta_min = logeta_opt * (1 + logeta_percent_minus)
logeta_max = logeta_opt * (1 - logeta_percent_plus)
logeta_list_minus = [[vmin, f(logeta_opt, logeta_min, i)]
for i in np.linspace(s, 0, logeta_num_steps_minus)]
logeta_list_plus = [[vmin, f(logeta_opt, logeta_max, i)]
for i in np.linspace(s / logeta_num_steps_plus, s,
logeta_num_steps_plus)]
self.vmin_logeta_sampling_table += [logeta_list_minus + logeta_list_plus]
self.vmin_logeta_sampling_table = np.array(self.vmin_logeta_sampling_table)
if plot:
self.PlotSamplingTable(plot_close=True)
return
def PlotSamplingTable(self, plot_close=False, plot_show=True, plot_optimum=True):
""" Plots the sampling points in the vmin-logeta plane.
"""
if plot_close:
plt.close()
print("sampling_size =", self.vmin_logeta_sampling_table.shape)
for tab in self.vmin_logeta_sampling_table:
plt.plot(tab[:, 0], tab[:, 1], 'o')
if plot_optimum:
self.PlotOptimum(xlim_percentage=(0.9, 1.1), ylim_percentage=(1.2, 0.8),
plot_close=False, plot_show=plot_show)
elif plot_show:
plt.show()
return
def GetLikelihoodTable(self, index, output_file_tail, logeta_index_range, extra_tail):
""" Prints to file lists of the form [logetaStar_ij, logL_ij] needed for
1D interpolation, where i is the index corresponding to vminStar_i and j is
the index for each logetaStar. Each file corresponds to a different index i.
Here only one file is written for a specific vminStar.
Input:
index: int
Index of vminStar.
output_file_tail: string
Tag to be added to the file name.
logeta_index_range: tuple
A touple (index0, index1) between which logetaStar will be considered.
If this is None, then the whole list of logetaStar is used.
extra_tail: string
Additional tail to be added to filenames.
"""
print('index =', index)
print('output_file_tail =', output_file_tail)
vminStar = self.vmin_logeta_sampling_table[index, 0, 0]
logetaStar_list = self.vmin_logeta_sampling_table[index, :, 1]
plot = False
if logeta_index_range is not None:
logetaStar_list = \
logetaStar_list[logeta_index_range[0]: logeta_index_range[1]]
plot = True
print("vminStar =", vminStar)
table = np.empty((0, 2))
for logetaStar in logetaStar_list:
try:
constr_opt = self.ConstrainedOptimalLikelihood(vminStar, logetaStar,
plot=plot)
except:
print("error")
os.system("say Error")
pass
else:
print("index =", index, "; vminStar =", vminStar,
"; logetaStar =", logetaStar, "; constr_opt =", constr_opt)
table = np.append(table, [[logetaStar, constr_opt]], axis=0)
# table = np.append(table, [logetaStar])
print("vminStar =", vminStar, "; table =", table)
if True:
temp_file = output_file_tail + "_" + str(index) + \
"_LogetaStarLogLikelihoodList" + extra_tail + ".dat"
print(temp_file)
np.savetxt(temp_file, table)
return
def LogLikelihoodList(self, output_file_tail, extra_tail="", processes=None,
vmin_index_list=None, logeta_index_range=None):
""" Loops thorugh the list of all vminStar and calls GetLikelihoodTable,
which will print the likelihood tables to files.
Input:
output_file_tail: string
Tag to be added to the file name.
extra_tail: string, optional
Additional tail to be added to filenames.
processes: int, optional
Number of processes for parallel programming.
vmin_index_list: ndarray, optional
List of indices in vminStar_list for which we calculate the optimal
likelihood. If not given, the whole list of vminStars is used.
logeta_index_range: tuple, optional
Atuple (index0, index1) between which logetaStar will be considered.
If not given, then the whole list of logetaStar is used.
"""
if vmin_index_list is None:
vmin_index_list = range(0, self.vmin_logeta_sampling_table.shape[0])
else:
try:
len(vmin_index_list)
except TypeError:
vmin_index_list = range(vmin_index_list,
self.vmin_logeta_sampling_table.shape[0])
print("vmin_index_list =", vmin_index_list)
print("logeta_index_range =", logeta_index_range)
kwargs = ({'index': index,
'output_file_tail': output_file_tail,
'logeta_index_range': logeta_index_range,
'extra_tail': extra_tail}
for index in vmin_index_list)
par.parmap(self.GetLikelihoodTable, kwargs, processes)
return
def _logL_interp(vars_list, constraints):
constr_not_valid = constraints(vars_list)[:-1] < 0
if np.any(constr_not_valid):
constr_list = constraints(vars_list)[constr_not_valid]
return -constr_list.sum() * 10**2
return logL_interp(vars_list)
def ConfidenceBand(self, output_file_tail, delta_logL, interpolation_order,
extra_tail="", multiplot=True):
""" Compute the confidence band.
Input:
output_file_tail: string
Tag to be added to the file name.
delta_logL: float
Target difference between the constrained minimum and the
unconstrained global minimum of MinusLogLikelihood.
interpolation_order: int
interpolation order for the interpolated constrained minimum of
MinusLogLikelihood as a function of logeta, for a fixed vmin.
extra_tail: string, optional
Additional tail to be added to filenames.
multiplot: bool, optional
Whether to plot log(L) as a function of logeta for each vmin, and the
horizontal line corresponding to a given delta_logL.
"""
print("self.vmin_sampling_list =", self.vmin_sampling_list)
self.vmin_logeta_band_low = []
self.vmin_logeta_band_up = []
vmin_last_step = self.optimal_vmin[-1]
if multiplot:
plt.close()
for index in range(self.vmin_sampling_list.size):
print("index =", index)
print("vmin =", self.vmin_sampling_list[index])
logeta_optim = self.OptimumStepFunction(min(self.vmin_sampling_list[index],
vmin_last_step))
file = output_file_tail + "_" + str(index) + \
"_LogetaStarLogLikelihoodList" + extra_tail + ".dat"
try:
with open(file, 'r') as f_handle:
table = np.loadtxt(f_handle)
except:
continue
x = table[:, 0] # this is logeta
y = table[:, 1] # this is logL
logL_interp = interpolate.interp1d(x, y, kind='cubic')
def _logL_interp(vars_list, constraints):
constr_not_valid = constraints(vars_list)[:-1] < 0
if np.any(constr_not_valid):
constr_list = constraints(vars_list)[constr_not_valid]
return -constr_list.sum() * 1e2
return logL_interp(vars_list)
print(self.optimal_logL - delta_logL)
print(np.array([table[0, 0]]), " ", table[-1, 0])
print(logeta_optim)
def constr_func(logeta, logeta_min=np.array([table[0, 0]]),
logeta_max=np.array([table[-1, 0]])):
return np.concatenate([logeta - logeta_min, logeta_max - logeta])
constr = ({'type': 'ineq', 'fun': constr_func})
try:
logeta_minimLogL = minimize(_logL_interp, np.array([logeta_optim]),
args=(constr_func,), constraints=constr).x[0]
except ValueError:
print("ValueError at logeta_minimLogL")
logeta_minimLogL = logeta_optim
pass
print("logeta_minimLogL =", logeta_minimLogL)
print("x =", x)
print("y =", y)
if multiplot:
plt.close()
plt.plot(x, y, 'o-')
plt.plot(x, (self.optimal_logL + 1) * np.ones_like(y))
plt.plot(x, (self.optimal_logL + 2.7) * np.ones_like(y))
plt.title("index =" + str(index) + ", v_min =" +
str(self.vmin_sampling_list[index]) + "km/s")
plt.xlim(x[0], x[-1])
plt.ylim(-5, 20)
plt.show()
error = F
try:
if y[0] > self.optimal_logL + delta_logL and \
logeta_minimLogL < self.optimal_logL + delta_logL:
sol = brentq(lambda logeta: logL_interp(logeta) - self.optimal_logL -
delta_logL,
table[0, 0], logeta_minimLogL)
self.vmin_logeta_band_low += \
[[self.vmin_sampling_list[index], sol]]
except ValueError:
print("ValueError: Error in calculating vmin_logeta_band_low")
error = T
try:
if y[-1] > self.optimal_logL + delta_logL and \
logeta_minimLogL < self.optimal_logL + delta_logL:
sol = brentq(lambda logeta: logL_interp(logeta) - self.optimal_logL -
delta_logL,
logeta_minimLogL, table[-1, 0])
self.vmin_logeta_band_up += \
[[self.vmin_sampling_list[index], sol]]
except ValueError:
print("ValueError: Error in calculating vmin_logeta_band_hi")
error = T
if error:
plt.close()
plt.plot(x, (self.optimal_logL + 1) * np.ones_like(y))
plt.plot(x, (self.optimal_logL + 2.7) * np.ones_like(y))
plt.title("index =" + str(index) + "; v_min =" +
str(self.vmin_sampling_list[index]) + "km/s")
plt.xlim(x[0], x[-1])
plt.ylim([-5, 20])
plt.plot(x, y, 'o-', color="r")
plt.plot(logeta_optim, logL_interp(logeta_optim), '*')
plt.plot(logeta_optim, self.optimal_logL, '*')
print("ValueError")
plt.show()
# raise
pass
if multiplot:
plt.show()
self.vmin_logeta_band_low = np.array(self.vmin_logeta_band_low)
self.vmin_logeta_band_up = np.array(self.vmin_logeta_band_up)
print("lower band: ", self.vmin_logeta_band_low)
print("upper band: ", self.vmin_logeta_band_up)
self.PlotConfidenceBand()
delta_logL = round(delta_logL, 1)
file = output_file_tail + "_FoxBand_low_deltalogL_" + str(delta_logL) + ".dat"
print(file)
np.savetxt(file, self.vmin_logeta_band_low)
file = output_file_tail + "_FoxBand_up_deltalogL_" + str(delta_logL) + ".dat"
print(file)
np.savetxt(file, self.vmin_logeta_band_up)
return
def PlotConfidenceBand(self):
""" Plot the confidence band and the best-fit function.
"""
plt.close()
try:
plt.plot(self.vmin_logeta_band_low[:, 0], self.vmin_logeta_band_low[:, 1], 'o-')
except IndexError:
pass
try:
plt.plot(self.vmin_logeta_band_up[:, 0], self.vmin_logeta_band_up[:, 1], 'o-')
except IndexError:
pass
self.PlotOptimum(ylim_percentage=(1.2, 0.8), plot_close=F, plot_show=T)
def ImportConfidenceBand(self, output_file_tail, delta_logL, extra_tail=""):
""" Import the confidence band from file.
Input:
output_file_tail: string
Tag to be added to the file name.
delta_logL: float
Target difference between the constrained minimum and the
unconstrained global minimum of MinusLogLikelihood.
extra_tail: string, optional
Additional tail to be added to filenames.
"""
delta_logL = round(delta_logL, 1)
file = output_file_tail + "_FoxBand_low_deltalogL_" + str(delta_logL) + \
extra_tail + ".dat"
print(file)
with open(file, 'r') as f_handle:
self.vmin_logeta_band_low = np.loadtxt(f_handle)
file = output_file_tail + "_FoxBand_up_deltalogL_" + str(delta_logL) + \
extra_tail + ".dat"
with open(file, 'r') as f_handle:
self.vmin_logeta_band_up = np.loadtxt(f_handle)
return
| gpl-2.0 |
caiostringari/swantools | test.py | 1 | 2740 |
import swantools.io
import swantools.utils
import swantools.plot
import datetime
import matplotlib.pyplot as plt
import numpy as np
def readtable():
R = swantools.io.SwanIO()
P = swantools.plot.SwanPlot()
# Reading TABLE dada with headers:
df = R.read_swantable('data/table.txt')
y = df["Hsig"]
x = df.index.values
P.timeseries(x,y,"Significant Wave Heights")
def readspc():
# Reading spectral data
R = swantools.io.SwanIO()
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
P = swantools.plot.SwanPlot()
P.spcplot(freqs,dirs,times[15],spectrum[15,:,:]*factors[15])
# for t, time in enumerate(times):
# P.spcplot(freqs,dirs,times[t],spectrum[t,:,:])
def readblock(mode):
R = swantools.io.SwanIO()
P = swantools.plot.SwanPlot()
if mode == "non-stat":
# Reading a block file - Non stationary example
lon,lat,times,hs = R.read_swanblock('data/block.mat','Hsig')
P.blockplot(lon,lat,hs[0,:,:],"Non-stationary Results")
# for t, time in enumerate(times):
# P.blockplot(lon,lat,hs[t,:,:],time.strftime("%Y%m%d %H:%M"))
elif mode == "stat":
# Reading a block file - Non stationary example
lon,lat,times,hs = R.read_swanblock('data/stat_block.mat','Hsig',stat=True)
P.blockplot(lon,lat,hs,"Stationary Results")
def writescp():
# Getting some data to play with
R = swantools.io.SwanIO()
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
# Re-writing the data
R.write_spectrum("spcout.spc",lat,lon,times,freqs,dirs,factors,spectrum)
# Plot to confirm
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('spcout.spc')
P = swantools.plot.SwanPlot()
for t, time in enumerate(times):
P.spcplot(freqs,dirs,times[t],spectrum[t,:,:])
def netcdf_output():
R = swantools.io.SwanIO()
W = swantools.io.Converters()
lon,lat,times,hs = R.read_swanblock('data/block.mat','Hsig')
W.np2nc("Hsig.nc",lat,lon,times,hs,"Significant Wave Height")
def spectral_output():
R = swantools.io.SwanIO()
W = swantools.io.Converters()
lon,lat,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
W.spc2nc("spectrum.nc",lat,lon,freqs,dirs,times,factors,spectrum)
if __name__ == "__main__":
# # Table data
# import seaborn as sns
# with sns.axes_style("darkgrid"):
# readtable()
# Spectral data
readspc()
# Field data
readblock("non-stat")
# Convertung block to netCDF4
netcdf_output()
# Converting spctral file to netCDF4
spectral_output()
# Wrinting spctral data
writescp()
| gpl-2.0 |
tpsatish95/OCR-on-Indus-Seals | code/Test/TextROI.py | 1 | 16306 | # -*- coding: utf-8 -*-
import skimage.io
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import selectivesearch
import numpy as np
import skimage.transform
import os
import shutil
import caffe
from PIL import Image
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
text_cut_final = set()
def getClass(FileList):
caffe.set_mode_gpu()
classifier = caffe.Classifier("../ROIs_Indus/deploy.prototxt","../ROIs_Indus/Models/bvlc_googlenet_indusnet_iter_20000.caffemodel" ,
image_dims=[224,224], raw_scale=255.0, channel_swap = [2,1,0])
inputs = [caffe.io.load_image(im_f) for im_f in FileList]
print("Classifying %d inputs." % len(inputs))
predictions = classifier.predict(inputs)
return predictions
def texbox_ext():
global text
global both_text
global text_cut_final
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in both_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1']
aw = A['x1'] + A['w'] - B['x1']
overf = 1
# if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
if A['x1'] < B['x1']: # B is right to A
aw = B['x1']+B['w'] - A['x1']
overf = 1
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
text_cut_final = text_cut_final - both_text # CHANGE THIS LINE
def texbox_cut():
global no_text
no_text = no_text.union(both_text)
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in no_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1'] + B['w']
overf = 1
if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
ah = A['h'] - (A['y1']+A['h'] - B['y1'])
overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
# if A['x1'] < B['x1']: # B is right to A
# aw = A['w'] - (A['x1']+A['w'] - B['x1'])
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
def extend_text_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def draw_textbox():
global width, height
thresh = ((width+height)/2)*(0.25)
tempc = set()
for x, y, w, h in text_boxes:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
f = 0
for x1, y1, w1, h1 in text_boxes:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
f = 1
if f == 0:
text.add((x, y, w, h))
text.add(extend_text_rect(temp))
def contains():
x1, y1, w1, h1 = p
for x, y, w, h in candidates:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
return True
if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
candidates.remove((x, y, w, h))
return False
return False
def extend_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def extend_superbox():
global width, height
thresh = ((width+height)/2)*(0.06)
tempc = set()
for x, y, w, h in final:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in final:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
final_extended.add(extend_rect(temp))
def draw_superbox(finals=[]):
noover = []
refinedT = []
global final
final = set()
# (x1,y1) top-left coord, (x2,y2) bottom-right coord, (w,h) size
if finals != []:
refinedT = finals
else:
refinedT = refined
remp = set(refinedT)
ref = list(refinedT)
while len(ref) > 0:
x1, y1, w1, h1 = ref[0]
if len(ref) == 1: # final box
final.add((x1, y1, w1, h1))
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
else:
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
over = set()
for x2, y2, w2, h2 in remp:
A = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
B = {'x1': x2, 'y1': y2, 'x2': x2+w2, 'y2': y2+h2, 'w': w2, 'h': h2}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overlap_A = float(SI) / float(SA)
overlap_B = float(SI) / float(SB)
# print(overlap_AB)
#
if overlap_A >= 0.40 or overlap_B >= 0.40:
over.add((B['x1'],B['y1'],B['w'],B['h']))
# print(len(over))
if len(over) != 0: #Overlap
remp = remp - over
for i in over: ref.remove(i)
over.add((A['x1'],A['y1'],A['w'],A['h']))
# print(over)
final.add((min([i[0] for i in over]), min([i[1] for i in over]), max([i[0]+i[2] for i in over]) - min([i[0] for i in over]), max([i[1]+i[3] for i in over]) - min([i[1] for i in over])))
# final.add((np.mean([i[0] for i in over]), np.mean([i[1] for i in over]), np.mean([i[2] for i in over]), np.mean([i[3] for i in over])))
noover.append(False)
else: #No overlap
final.add((x1,y1,w1,h1))
noover.append(True)
if all(noover):
return
else:
draw_superbox(final)
return
def contains_remove():
for x, y, w, h in merged_candidates:
f = False
temp = set(merged_candidates)
temp.remove((x, y, w, h))
for x1, y1, w1, h1 in temp:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
f = False
break
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
else:
f = True
if f == True:
refined.add((x, y, w, h))
# def contains_remove():
# for x, y, w, h in merged_candidates:
# temp = set(merged_candidates)
# temp.remove((x, y, w, h))
# test = []
# for x1, y1, w1, h1 in temp:
# A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
# B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# # overlap between A and B
# SA = A['w']*A['h']
# SB = B['w']*B['h']
# SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
# SU = SA + SB - SI
# overlap_AB = float(SI) / float(SU)
# if overlap_AB > 0.0:
# # if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
# test.append(False)
# else:
# test.append(True)
# else:
# test.append(True)
# if all(test):
# refined.add((x, y, w, h))
def mean_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[1]+i[3] for i in l]) - min([i[1] for i in l]))
def merge():
global width, height
thresh = int(((width+height)/2)*(0.14))
tempc = set()
for x, y, w, h in candidates:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in candidates:
if abs(x1-x) <= thresh and abs(y1-y) <= thresh and abs(w1-w) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
merged_candidates.add(mean_rect(temp))
contains_remove()
for name in os.listdir("./Images"):
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
print("Processing Image " + name.split(".")[0])
fname = "./Images/" + name
print(fname)
img = skimage.io.imread(fname)
width = len(img[0])
height = len(img)
# new_size = 256
# height = int(new_size * height / width)
# width = new_size
if width*height < 256*256*(0.95) and abs(width-height) <= 3 :
new_size = 512
height = int(new_size * height / width)
width = new_size
print("A")
elif width*height < 220*220*(1.11):
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B")
elif width*height < 256*256:
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B1")
elif width*height > 512*512*(0.99) and width < 800 and height < 800:
new_size = 512
height = int(new_size * height / width)
width = new_size
print("C")
elif width*height < 512*512*(0.95) and width*height > 256*256*(1.15):
new_size = 512
height = int(new_size * height / width)
width = new_size
print("D")
tried = []
while True:
tried.append(width)
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
stage = 1
text_cut_final = set()
for sc in [350,450,500]:
for sig in [0.8]:
for mins in [30,60,120]: # important
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]):
pass
else:
img = skimage.transform.resize(img, (height, width))
img_lbl, regions = selectivesearch.selective_search(
img, scale=sc, sigma= sig,min_size = mins)
for r in regions:
# excluding same rectangle (with different segments)
if r['rect'] in candidates:
continue
# excluding regions smaller than 2000 pixels
if r['size'] < 2000:
continue
# distorted rects
x, y, w, h = r['rect']
if w / h > 1.2 or h / w > 1.2:
continue
if w >= (img.shape[0]-1)*(0.7) and h >= (img.shape[1]-1)*(0.7):
continue
candidates.add(r['rect'])
print("Stage " + str(stage) + " Complete.")
stage+=1
print(candidates)
merge()
print(refined)
draw_superbox()
print(final)
extend_superbox()
print(final_extended)
os.makedirs("Regions/"+name.split(".")[0])
# draw rectangles on the original image
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in final_extended:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Regions/"+name.split(".")[0]+"/FinalRegions.png")
plt.close('all')
img1 = skimage.io.imread(fname)[:,:,:3]
if height == len(img1) and width == len(img1[0]): pass
else: img1 = skimage.transform.resize(img1, (height, width))
# imgT = Image.open(fname).convert('L')
# w, h = imgT.size
# if height == h and width == w:
# pass
# else:
# # img1 = skimage.transform.resize(img1, (height, width))
# imgT = imgT.resize((width,height), Image.ANTIALIAS)
ij = 1
fList = []
box_list = []
for x, y, w, h in final_extended:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg", img1[y:y+h,x:x+w])
# imgT.crop((x,y,x+w,y+h)).save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
# imgT = Image.open("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.png").convert('L')
# imgT.save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
fList.append("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg")
box_list.append((x, y, w, h))
ij+=1
# classify text no text
text_boxes=set()
text = set()
no_text = set()
both_text = set()
text_cut_final = set()
i = 0
try:
a = getClass(fList)
l = np.array([0,1,2])
for pred in a:
idx = list((-pred).argsort())
pred = l[np.array(idx)]
if pred[0] == 1 or pred[0] == 2:
text_boxes.add(box_list[i])
elif pred[0] == 0:
no_text.add(box_list[i])
if pred[0] == 2:
both_text.add(box_list[i])
print(pred)
i+=1
except:
print("No Text Regions")
draw_textbox()
print(text)
texbox_cut()
print(text_cut)
texbox_ext()
print(text_cut_final)
# draw rectangles on the original image
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]): pass
else: img = skimage.transform.resize(img, (height, width))
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in text_cut_final:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Result/final_"+name.split(".")[0]+".png")
plt.close('all')
ij = 1
for x, y, w, h in text_cut_final:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_text.png", img[y:y+h,x:x+w])
ij+=1
# min area check
minf = 0
for x, y, w, h in text_cut_final:
if w*h < width*height*0.20 and (w < width*0.20 or h < height*0.20):
minf = 1
if (len(text_cut_final) == 0 or minf == 1) and len(tried) < 3:
print(tried)
print("New size being tried.")
shutil.rmtree("Regions/"+name.split(".")[0]+"/")
img = skimage.io.imread(fname)
twidth = len(img[0])
theight = len(img)
new_size = list(set([256,512,twidth]) - set(tried))[0]
height = int(new_size * theight / twidth)
width = new_size
else:
break
| apache-2.0 |
CalvinNeo/PyGeo | countpca_segmentation_2.py | 1 | 7423 | #coding:utf8
import numpy as np, scipy
import pylab as pl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
from matplotlib import cm
from matplotlib import mlab
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from itertools import *
import collections
from multiprocessing import Pool
import random
from scipy.optimize import leastsq
from adasurf import AdaSurfConfig, adasurf, paint_surfs, identifysurf, point_normalize, Surface
ELAPSE_SEG = 0
class SurfSegConfig:
def __init__(self):
self.slice_count = 4
self.origin_points = 5
self.most_combination_points = 20
self.same_threshold = 0.1 # the smaller, the more accurate when judging two surfaces are identical, more surfaces can be generated
self.pointsame_threshold = 1.0
self.filter_rate = 0.08
self.filter_count = 50
self.ori_adarate = 2.0
self.step_adarate = 1.0
self.max_adarate = 2.0
self.split_by_count = True
self.weak_abort = 45
def paint_points(points, show = True, title = '', xlim = None, ylim = None, zlim = None):
fig = pl.figure()
ax = fig.add_subplot(111, projection='3d')
if xlim == None:
xlim = (np.min(points[:, 0]), np.max(points[:, 0]))
if ylim == None:
ylim = (np.min(points[:, 1]), np.max(points[:, 1]))
if zlim == None:
zlim = (np.min(points[:, 2]), np.max(points[:, 2]))
x1 = points[:, 0]
y1 = points[:, 1]
z1 = points[:, 2]
ax.scatter(x1, y1, z1, c='r')
ax.set_zlim(zlim[0], zlim[1])
ax.set_ylim(ylim[0], ylim[1])
ax.set_xlim(xlim[0], xlim[1])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
pl.title(title)
if show:
pl.show()
return fig
def surf_segmentation(points, config, paint_when_end = False):
global ELAPSE_SEG
config.slice_count = min(int(len(points) / config.origin_points), config.slice_count)
assert len(points) / config.slice_count >= config.origin_points
adasurconfig = AdaSurfConfig({'origin_points': config.origin_points
, 'most_combination_points': config.most_combination_points
, 'same_threshold': config.same_threshold
, 'filter_rate': config.filter_rate
, 'ori_adarate': config.ori_adarate
, 'step_adarate': config.step_adarate
, 'max_adarate': config.max_adarate
, 'pointsame_threshold': config.pointsame_threshold
, 'filter_count' : config.filter_count
, 'weak_abort' : config.weak_abort
})
surfs = []
slice_fig = []
npoints = point_normalize(points)
starttime = time.clock()
xlim = (np.min(npoints[:, 0]), np.max(npoints[:, 0]))
ylim = (np.min(npoints[:, 1]), np.max(npoints[:, 1]))
zlim = (np.min(npoints[:, 2]), np.max(npoints[:, 2]))
pca_md = mlab.PCA(np.copy(npoints))
projection0_direction = None
# projection0_direction = pca_md.Y[0]
# projection0 = np.inner(projection0_direction, npoints)
projection0 = npoints[:, 0]
if config.split_by_count:
step_count = len(projection0) / config.slice_count
pointsets = [np.array([]).reshape(0,3)] * config.slice_count
sorted_projection0_index = np.argsort(projection0)
current_slot_count, ptsetid = 0, 0
for index in sorted_projection0_index:
pointsets[ptsetid] = np.vstack((pointsets[ptsetid], npoints[index, :]))
current_slot_count += 1
if current_slot_count > step_count:
current_slot_count = 0
ptsetid += 1
else:
projection0min, projection0max = np.min(projection0), np.max(projection0)
step_len = (projection0max - projection0min) / config.slice_count
pointsets = [np.array([]).reshape(0,3)] * config.slice_count
for i in xrange(len(projection0)):
if projection0[i] == projection0max:
ptsetid = config.slice_count - 1
else:
ptsetid = int((projection0[i] - projection0min) / step_len)
pointsets[ptsetid] = np.vstack((pointsets[ptsetid], npoints[i]))
# random.shuffle(pointsets)
partial_surfs, fail = [], np.array([]).reshape(0,3)
# for (ptset, ptsetindex) in zip(pointsets, range(len(pointsets))):
# print "slice", len(ptset), xlim, ylim, zlim
# paint_points(ptset, xlim = xlim, ylim = ylim, zlim = zlim)
for (ptset, ptsetindex) in zip(pointsets, range(len(pointsets))):
print "--------------------------------------"
print "before segment", ptsetindex, '/', len(pointsets)
print 'derived surfs:'
# print '---000', ptset.shape, np.array(fail).shape, np.array(fail), fail
if fail == None:
allptfortest = np.array(ptset)
else:
allptfortest = np.vstack((ptset, np.array(fail).reshape(-1,3)))
print "len of surf is: ", len(partial_surfs), ", len of points is: ", len(allptfortest)
if allptfortest != None and len(allptfortest) > 0 :
partial_surfs, _, fail, extradata = identifysurf(allptfortest, adasurconfig, donorm = False, surfs = partial_surfs, title = str(ptsetindex)
, paint_when_end = paint_when_end, current_direction = projection0_direction)
if paint_when_end:
slice_fig.append(extradata[0])
if fail == None:
print "after segment", ptsetindex, "len of surf", len(partial_surfs), "fail is None", fail
else:
print "after segment", ptsetindex, "len of surf", len(partial_surfs), "len of fail", len(fail)
for x in partial_surfs:
x.printf()
surfs.extend(partial_surfs)
# fig = pl.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(npoints[:, 0], npoints[:, 1], npoints[:, 2], c='r')
# x = np.linspace(0, pca_md.Wt[0, 0] * 100, 300)
# y = np.linspace(0, pca_md.Wt[0, 1] * 100, 300)
# z = np.linspace(0, pca_md.Wt[0, 2] * 100, 300)
# ax.plot(x, y, z, c='k')
# x = np.linspace(0, pca_md.Wt[1, 0] * 100, 300)
# y = np.linspace(0, pca_md.Wt[1, 1] * 100, 300)
# z = np.linspace(0, pca_md.Wt[1, 2] * 100, 300)
# ax.plot(x, y, z, c='g')
# pl.show()
return surfs, npoints, (slice_fig, )
if __name__ == '__main__':
c = np.loadtxt('5.py', comments='#')
config = SurfSegConfig()
print 'config', config.__dict__
import time
starttime = time.clock()
surfs, npoints, extradata = surf_segmentation(c, config, paint_when_end = True)
print "----------BELOW ARE SURFACES---------- count:", len(surfs)
print 'TOTAL: ', time.clock() - starttime
print 'ELAPSE_SEG: ', ELAPSE_SEG
ALL_POINT = 0
for s,i in zip(surfs, range(len(surfs))):
print "SURFACE ", i
print s.args # surface args
print s.residuals # MSE
print len(s.points)
ALL_POINT += len(s.points)
# print s[2] # npoints
print '**************************************'
print 'ALL_POINT: ', ALL_POINT
print '----------BELOW ARE POINTS----------'
# for s,i in zip(surfs, range(len(surfs))):
# print "SURFACE ", i
# print s.points
paint_surfs(surfs, npoints, 'all')
print extradata
for slice_fig in extradata[0]:
slice_fig.show()
| apache-2.0 |
fmacias64/deap | examples/es/cma_plotting.py | 12 | 4326 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools
import matplotlib.pyplot as plt
# Problem size
N = 10
NGEN = 125
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
def main(verbose=True):
# The cma module uses the numpy random number generator
numpy.random.seed(64)
# The CMA-ES algorithm takes a population of one individual as argument
# The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
# for more details about the rastrigin and other tests for CMA-ES
strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
halloffame = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = numpy.ndarray((NGEN,1))
axis_ratio = numpy.ndarray((NGEN,1))
diagD = numpy.ndarray((NGEN,N))
fbest = numpy.ndarray((NGEN,1))
best = numpy.ndarray((NGEN,N))
std = numpy.ndarray((NGEN,N))
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
halloffame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
if verbose:
print(logbook.stream)
# Save more data along the evolution for latter plotting
# diagD is sorted and sqrooted in the update method
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :N] = strategy.diagD**2
fbest[gen] = halloffame[0].fitness.values
best[gen, :N] = halloffame[0]
std[gen, :N] = numpy.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * NGEN, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.subplot(2, 2, 1)
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.subplot(2, 2, 2)
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.subplot(2, 2, 3)
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.subplot(2, 2, 4)
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
if __name__ == "__main__":
main(False)
| lgpl-3.0 |
jjx02230808/project0223 | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
manuelli/director | src/python/director/planplayback.py | 1 | 7857 | import os
import vtkAll as vtk
import math
import time
import re
import numpy as np
from director.timercallback import TimerCallback
from director import objectmodel as om
from director.simpletimer import SimpleTimer
from director.utime import getUtime
from director import robotstate
import copy
import pickle
import scipy.interpolate
def asRobotPlan(msg):
'''
If the given message is a robot_plan_with_supports_t then this function returns
the plan message contained within it. For any other message type, this function
just returns its input argument.
'''
try:
import drc as lcmdrc
except ImportError:
pass
else:
if isinstance(msg, lcmdrc.robot_plan_with_supports_t):
return msg.plan
return msg
class PlanPlayback(object):
def __init__(self):
self.animationCallback = None
self.animationTimer = None
self.interpolationMethod = 'slinear'
self.playbackSpeed = 1.0
self.jointNameRegex = ''
@staticmethod
def getPlanPoses(msgOrList):
if isinstance(msgOrList, list):
messages = msgOrList
allPoseTimes, allPoses = PlanPlayback.getPlanPoses(messages[0])
for msg in messages[1:]:
poseTimes, poses = PlanPlayback.getPlanPoses(msg)
poseTimes += allPoseTimes[-1]
allPoseTimes = np.hstack((allPoseTimes, poseTimes[1:]))
allPoses += poses[1:]
return allPoseTimes, allPoses
else:
msg = asRobotPlan(msgOrList)
poses = []
poseTimes = []
for plan in msg.plan:
pose = robotstate.convertStateMessageToDrakePose(plan)
poseTimes.append(plan.utime / 1e6)
poses.append(pose)
return np.array(poseTimes), poses
@staticmethod
def getPlanElapsedTime(msg):
msg = asRobotPlan(msg)
startTime = msg.plan[0].utime
endTime = msg.plan[-1].utime
return (endTime - startTime) / 1e6
@staticmethod
def mergePlanMessages(plans):
msg = copy.deepcopy(plans[0])
for plan in plans[1:]:
plan = copy.deepcopy(plan)
lastTime = msg.plan[-1].utime
for state in plan.plan:
state.utime += lastTime
msg.plan_info += plan.plan_info
msg.plan += plan.plan
msg.num_states = len(msg.plan)
return msg
@staticmethod
def isPlanInfoFeasible(info):
return 0 <= info < 10
@staticmethod
def isPlanFeasible(plan):
plan = asRobotPlan(plan)
return plan is not None and (max(plan.plan_info) < 10 and min(plan.plan_info) >= 0)
def stopAnimation(self):
if self.animationTimer:
self.animationTimer.stop()
def setInterpolationMethod(method):
self.interpolationMethod = method
def playPlan(self, msg, jointController):
self.playPlans([msg], jointController)
def playPlans(self, messages, jointController):
assert len(messages)
poseTimes, poses = self.getPlanPoses(messages)
self.playPoses(poseTimes, poses, jointController)
def getPoseInterpolatorFromPlan(self, message):
poseTimes, poses = self.getPlanPoses(message)
return self.getPoseInterpolator(poseTimes, poses)
def getPoseInterpolator(self, poseTimes, poses, unwrap_rpy=True):
if unwrap_rpy:
poses = np.array(poses, copy=True)
poses[:,3:6] = np.unwrap(poses[:,3:6],axis=0)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(poseTimes, poses, axis=0, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(poseTimes, poses, axis=0)
return f
def getPlanPoseMeshes(self, messages, jointController, robotModel, numberOfSamples):
poseTimes, poses = self.getPlanPoses(messages)
f = self.getPoseInterpolator(poseTimes, poses)
sampleTimes = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
meshes = []
for sampleTime in sampleTimes:
pose = f(sampleTime)
jointController.setPose('plan_playback', pose)
polyData = vtk.vtkPolyData()
robotModel.model.getModelMesh(polyData)
meshes.append(polyData)
return meshes
def showPoseAtTime(self, time, jointController, poseInterpolator):
pose = poseInterpolator(time)
jointController.setPose('plan_playback', pose)
def playPoses(self, poseTimes, poses, jointController):
f = self.getPoseInterpolator(poseTimes, poses)
timer = SimpleTimer()
def updateAnimation():
tNow = timer.elapsed() * self.playbackSpeed
if tNow > poseTimes[-1]:
pose = poses[-1]
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
return False
pose = f(tNow)
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
self.animationTimer = TimerCallback()
self.animationTimer.targetFps = 60
self.animationTimer.callback = updateAnimation
self.animationTimer.start()
updateAnimation()
def picklePlan(self, filename, msg):
poseTimes, poses = self.getPlanPoses(msg)
pickle.dump((poseTimes, poses), open(filename, 'w'))
def getMovingJointNames(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
return jointNames
def plotPlan(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
self.plotPoses(poseTimes, poses)
def plotPoses(self, poseTimes, poses):
import matplotlib.pyplot as plt
poses = np.array(poses)
if self.jointNameRegex:
jointIds = range(poses.shape[1])
else:
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
jointTrajectories = [poses[:,jointId] for jointId in jointIds]
seriesNames = []
sampleResolutionInSeconds = 0.01
numberOfSamples = (poseTimes[-1] - poseTimes[0]) / sampleResolutionInSeconds
xnew = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
fig = plt.figure()
ax = fig.add_subplot(111)
for jointId, jointName, jointTrajectory in zip(jointIds, jointNames, jointTrajectories):
if self.jointNameRegex and not re.match(self.jointNameRegex, jointName):
continue
x = poseTimes
y = jointTrajectory
y = np.rad2deg(y)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(x, y, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(x, y)
ax.plot(x, y, 'ko')
seriesNames.append(jointName + ' points')
ax.plot(xnew, f(xnew), '-')
seriesNames.append(jointName + ' ' + self.interpolationMethod)
ax.legend(seriesNames, loc='upper right').draggable()
ax.set_xlabel('time (s)')
ax.set_ylabel('joint angle (deg)')
ax.set_title('joint trajectories')
plt.show()
| bsd-3-clause |
mwindau/praktikum | v351/dreieck.py | 1 | 1188 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from scipy.optimize import curve_fit
oberwelle3, amplitude3 = np.genfromtxt('Rohdaten/dreieckspannung.txt',unpack=True)
plt.plot(oberwelle3, amplitude3,'k.',label="Messdaten")
#plt.legend(loc='best')
plt.grid()
#plt.xlim(0,1.5)
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'Oberwellen')
plt.ylabel(r'$\mathrm{U}/V$')
plt.tight_layout()
#plt.show()
#plt.savefig('build/50ohm.pdf')
####################
newX = np.logspace(-4, 1, base=10) # Makes a nice domain for the fitted curves.
# This avoids the sorting and the swarm of lines.
# Let's fit an exponential function.
# This looks like a line on a lof-log plot.
def myExpFunc(x, a, b):
return a * np.power(x, b)
popt, pcov = curve_fit(myExpFunc, oberwelle3, amplitude3)
plt.plot(newX, myExpFunc(newX, *popt), 'r-',
label="Fit".format(*popt))
plt.xlim(10**-2, 10**1)
plt.ylim(10**-0.5, 10**1)
print('Exponential Fit: y = (a*(x**b))')
print('\ta = popt[0] = {0}\n\tb = popt[1] = {1}'.format(*popt))
####################
plt.legend(loc='best')
#plt.show()
plt.savefig('build/dreieckspannung.pdf')
| mit |
dhruv13J/scikit-learn | examples/svm/plot_iris.py | 62 | 3251 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
ilo10/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
imatge-upc/saliency-2016-cvpr | shallow/train.py | 2 | 3064 | # add to kfkd.py
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet,BatchIterator
import os
import numpy as np
from sklearn.utils import shuffle
import cPickle as pickle
import matplotlib.pyplot as plt
import Image
import ImageOps
from scipy import misc
import scipy.io
import theano
def load():
f = file('data_Salicon_T.cPickle', 'rb')
loaded_obj = pickle.load(f)
f.close()
X, y = loaded_obj
return X, y
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class FlipBatchIterator(BatchIterator):
def transform(self, Xb, yb):
Xb, yb = super(FlipBatchIterator, self).transform(Xb, yb)
# Flip half of the images in this batch at random:
bs = Xb.shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
tmp = yb[indices].reshape(bs/2,1,48,48)
mirror = tmp[ :,:,:, ::-1]
yb[indices] = mirror.reshape(bs/2,48*48)
return Xb, yb
net2 = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('hidden4', layers.DenseLayer),
('maxout6',layers.FeaturePoolLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 3, 96, 96),
conv1_num_filters=32, conv1_filter_size=(5, 5), pool1_pool_size=(2, 2),
conv2_num_filters=64, conv2_filter_size=(3, 3), pool2_pool_size=(2, 2),
conv3_num_filters=64, conv3_filter_size=(3, 3), pool3_pool_size=(2, 2),
hidden4_num_units=48*48*2,
maxout6_pool_size=2,output_num_units=48*48,output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.05)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.05, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
],
batch_iterator_train=FlipBatchIterator(batch_size=128),
max_epochs=1200,
verbose=1,
)
X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
y.shape, y.min(), y.max()))
X = X.astype(np.float32)
y = y.astype(np.float32)
net.fit(X, y)
with open('JuntingNet_SALICON.pickle', 'wb') as f:
pickle.dump(net2, f, -1) | mit |
akosyakov/intellij-community | python/helpers/pydev/pydev_ipython/matplotlibtools.py | 52 | 5401 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| apache-2.0 |
uberdugo/mlia | Ch05/EXTRAS/plot2D.py | 7 | 1276 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import logRegres
dataMat,labelMat=logRegres.loadDataSet()
dataArr = array(dataMat)
weights = logRegres.stocGradAscent0(dataArr,labelMat)
n = shape(dataArr)[0] #number of points to create
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
markers =[]
colors =[]
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.scatter(xcord,ycord, c=colors, s=markers)
type1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
type2 = ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
#weights = [-2.9, 0.72, 1.29]
#weights = [-5, 1.09, 1.42]
weights = [13.03822793, 1.32877317, -1.96702074]
weights = [4.12, 0.48, -0.6168]
y = (-weights[0]-weights[1]*x)/weights[2]
type3 = ax.plot(x, y)
#ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2)
#ax.axis([-5000,100000,-2,25])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() | gpl-3.0 |
potash/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/api/scatter_piecharts.py | 6 | 1194 | """
This example makes custom 'pie charts' as the markers for a scatter plotqu
Thanks to Manuel Metz for the example
"""
import math
import numpy as np
import matplotlib.pyplot as plt
# first define the ratios
r1 = 0.2 # 20%
r2 = r1 + 0.4 # 40%
# define some sizes of the scatter marker
sizes = [60,80,120]
# calculate the points of the first pie marker
#
# these are just the origin (0,0) +
# some points on a circle cos,sin
x = [0] + np.cos(np.linspace(0, 2*math.pi*r1, 10)).tolist()
y = [0] + np.sin(np.linspace(0, 2*math.pi*r1, 10)).tolist()
xy1 = list(zip(x,y))
# ...
x = [0] + np.cos(np.linspace(2*math.pi*r1, 2*math.pi*r2, 10)).tolist()
y = [0] + np.sin(np.linspace(2*math.pi*r1, 2*math.pi*r2, 10)).tolist()
xy2 = list(zip(x,y))
x = [0] + np.cos(np.linspace(2*math.pi*r2, 2*math.pi, 10)).tolist()
y = [0] + np.sin(np.linspace(2*math.pi*r2, 2*math.pi, 10)).tolist()
xy3 = list(zip(x,y))
fig, ax = plt.subplots()
ax.scatter( np.arange(3), np.arange(3), marker=(xy1,0), s=sizes, facecolor='blue' )
ax.scatter( np.arange(3), np.arange(3), marker=(xy2,0), s=sizes, facecolor='green' )
ax.scatter( np.arange(3), np.arange(3), marker=(xy3,0), s=sizes, facecolor='red' )
plt.show()
| mit |
mmottahedi/neuralnilm_prototype | scripts/e249.py | 2 | 3897 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=True
#lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
layers_config=[
{
'type': LSTMLayer,
'num_units': 10,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
}
]
)
def exp_x(name, learning_rate):
global source
try:
a = source
except NameError:
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
updates=partial(nesterov_momentum, learning_rate=learning_rate)
))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid,
'W': Normal(std=(1/sqrt(50)))
}
)
net = Net(**net_dict_copy)
return net
def main():
for experiment, learning_rate in [('a', 1.0), ('b', 0.1), ('c', 0.01),
('d', 0.001), ('e', 0.0001)]:
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
print("***********************************")
print("Preparing", full_exp_name, "...")
try:
net = exp_x(full_exp_name, learning_rate)
run_experiment(net, path, epochs=1000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
| mit |
brodeau/aerobulk | python/plot_tests/plot_station_asf.py | 1 | 9926 | #!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Post-diagnostic of STATION_ASF / L. Brodeau, 2019
import sys
from os import path as path
#from string import replace
import math
import numpy as nmp
from netCDF4 import Dataset,num2date
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
reload(sys)
sys.setdefaultencoding('utf8')
cy1 = '2016' ; # First year
cy2 = '2018' ; # Last year
jt0 = 0
jt0 = 17519
dir_figs='.'
size_fig=(13,7)
fig_ext='png'
clr_red = '#AD0000'
clr_blu = '#3749A3'
clr_gre = '#548F64'
clr_sat = '#ffed00'
clr_mod = '#008ab8'
rDPI=200.
L_ALGOS = [ 'COARE3p6' , 'ECMWF' , 'NCAR' ]
l_xtrns = [ '-noskin' , '-noskin' , '' ] ; # string to add to algo name (L_ALGOS) to get version without skin params turned on
l_color = [ '#ffed00' , '#008ab8' , '0.4' ] ; # colors to differentiate algos on the plot
l_width = [ 3 , 2 , 1 ] ; # line-width to differentiate algos on the plot
l_style = [ '-' , '-' , '--' ] ; # line-style
L_VNEM = [ 'qla' , 'qsb' , 'qt' , 'qlw' , 'taum' , 'dt_skin' ]
L_VARO = [ 'Qlat' , 'Qsen' , 'Qnet' , 'Qlw' , 'Tau' , 'dT_skin' ] ; # name of variable on figure
L_VARL = [ r'$Q_{lat}$', r'$Q_{sens}$' , r'$Q_{net}$' , r'$Q_{lw}$' , r'$|\tau|$' , r'$\Delta T_{skin}$' ] ; # name of variable in latex mode
L_VUNT = [ r'$W/m^2$' , r'$W/m^2$' , r'$W/m^2$' , r'$W/m^2$' , r'$N/m^2$' , 'K' ]
L_VMAX = [ 75. , 75. , 800. , 25. , 1.2 , -0.7 ]
L_VMIN = [ -250. , -125. , -400. , -150. , 0. , 0.7 ]
L_ANOM = [ True , True , True , True , True , False ]
#L_VNEM = [ 'qlw' ]
#L_VARO = [ 'Qlw' ] ; # name of variable on figure
#L_VARL = [ r'$Q_{lw}$' ] ; # name of variable in latex mode
#L_VUNT = [ r'$W/m^2$' ]
#L_VMAX = [ 25. ]
#L_VMIN = [ -150. ]
#L_ANOM = [ True ]
nb_algos = len(L_ALGOS) ; print(nb_algos)
# Getting arguments:
narg = len(sys.argv)
if narg != 2:
print 'Usage: '+sys.argv[0]+' <DIR_OUT_SASF>'; sys.exit(0)
cdir_data = sys.argv[1]
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Populating and checking existence of files to be read
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def chck4f(cf):
cmesg = 'ERROR: File '+cf+' does not exist !!!'
if not path.exists(cf): print cmesg ; sys.exit(0)
###cf_in = nmp.empty((), dtype="S10")
cf_in = [] ; cf_in_ns = []
for ja in range(nb_algos):
cfi = cdir_data+'/output/'+'STATION_ASF-'+L_ALGOS[ja]+'_1h_'+cy1+'0101_'+cy2+'1231_gridT.nc'
chck4f(cfi)
cf_in.append(cfi)
# Same but without skin params:
for ja in range(nb_algos):
cfi = cdir_data+'/output/'+'STATION_ASF-'+L_ALGOS[ja]+l_xtrns[ja]+'_1h_'+cy1+'0101_'+cy2+'1231_gridT.nc'
chck4f(cfi)
cf_in_ns.append(cfi)
print('Files we are goin to use:')
for ja in range(nb_algos): print(cf_in[ja])
print(' --- same without cool-skin/warm-layer:')
for ja in range(nb_algos): print(cf_in_ns[ja])
#-----------------------------------------------------------------
# Getting time array from the first file:
id_in = Dataset(cf_in[0])
vt = id_in.variables['time_counter'][jt0:]
cunit_t = id_in.variables['time_counter'].units
clndr_t = id_in.variables['time_counter'].calendar
id_in.close()
Nt = len(vt)
print(' "time" => units = '+cunit_t+', calendar = "'+clndr_t+'"')
vtime = num2date(vt, units=cunit_t) ; # something understandable!
ii=Nt/300
ib=max(ii-ii%10,1)
xticks_d=int(30*ib)
font_inf = { 'fontname':'Open Sans', 'fontweight':'normal', 'fontsize':14 }
nb_var = len(L_VNEM)
xF = nmp.zeros((Nt,nb_algos))
xFa = nmp.zeros((Nt,nb_algos))
for ctest in ['skin','noskin']:
for jv in range(nb_var):
print('\n *** Treating variable: '+L_VARO[jv]+' ('+ctest+') !')
for ja in range(nb_algos):
#
if ctest == 'skin': id_in = Dataset(cf_in[ja])
if ctest == 'noskin': id_in = Dataset(cf_in_ns[ja])
xF[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
fig = plt.figure(num = jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
for ja in range(nb_algos):
plt.plot(vtime, xF[:,ja], '-', color=l_color[ja], linestyle=l_style[ja], linewidth=l_width[ja], label=L_ALGOS[ja], zorder=10+ja)
ax1.set_ylim(L_VMIN[jv], L_VMAX[jv]) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate(cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig(L_VARO[jv]+'_'+ctest+'.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(jv)
if L_ANOM[jv]:
for ja in range(nb_algos): xFa[:,ja] = xF[:,ja] - nmp.mean(xF,axis=1)
if nmp.sum(xFa[:,:]) == 0.0:
print(' Well! Seems that for variable '+L_VARO[jv]+', choice of algo has no impact a all!')
print(' ==> skipping anomaly plot...')
else:
# Want a symetric y-range that makes sense for the anomaly we're looking at:
rmax = nmp.max(xFa) ; rmin = nmp.min(xFa)
rmax = max( abs(rmax) , abs(rmin) )
romagn = math.floor(math.log(rmax, 10)) ; # order of magnitude of the anomaly we're dealing with
rmlt = 10.**(int(romagn)) / 2.
yrng = math.copysign( math.ceil(abs(rmax)/rmlt)*rmlt , rmax)
#print 'yrng = ', yrng ; #sys.exit(0)
fig = plt.figure(num = 10+jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
for ja in range(nb_algos):
plt.plot(vtime, xFa[:,ja], '-', color=l_color[ja], linewidth=l_width[ja], label=L_ALGOS[ja], zorder=10+ja)
ax1.set_ylim(-yrng,yrng) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate('Anomaly of '+cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig(L_VARO[jv]+'_'+ctest+'_anomaly.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(10+jv)
# Difference skin vs noskin:
xFns = nmp.zeros((Nt,nb_algos))
for jv in range(nb_var-1):
print('\n *** Treating variable: '+L_VARO[jv]+' ('+ctest+') !')
for ja in range(nb_algos-1):
id_in = Dataset(cf_in[ja])
xF[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
#
id_in = Dataset(cf_in_ns[ja])
xFns[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
xFa[:,ja] = xF[:,ja] - xFns[:,ja] ; # difference!
# Want a symetric y-range that makes sense for the anomaly we're looking at:
rmax = nmp.max(xFa) ; rmin = nmp.min(xFa)
rmax = max( abs(rmax) , abs(rmin) )
romagn = math.floor(math.log(rmax, 10)) ; # order of magnitude of the anomaly we're dealing with
rmlt = 10.**(int(romagn)) / 2.
yrng = math.copysign( math.ceil(abs(rmax)/rmlt)*rmlt , rmax)
print 'yrng = ', yrng ; #sys.exit(0)
for ja in range(nb_algos-1):
calgo = L_ALGOS[ja]
if nmp.sum(xFa[:,ja]) == 0.0:
print(' Well! Seems that for variable '+L_VARO[jv]+', and algo '+calgo+', skin param has no impact')
print(' ==> skipping difference plot...')
else:
fig = plt.figure(num = jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
plt.plot(vtime, xFa[:,ja], '-', color=l_color[ja], linestyle=l_style[ja], linewidth=l_width[ja], label=None, zorder=10+ja)
ax1.set_ylim(-yrng,yrng) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
#plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate(cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig('diff_skin-noskin_'+L_VARO[jv]+'_'+calgo+'_'+ctest+'.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(jv)
| gpl-3.0 |
CharlesShang/TFFRCNN | lib/roi_data_layer/minibatch.py | 5 | 8725 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
import os
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
from ..utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] \
if 'gt_ishard' in roidb[0] else np.zeros(gt_inds.size, dtype=int)
# blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]
blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] \
if 'dontcare_areas' in roidb[0] else np.zeros([0, 4], dtype=float)
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['im_name'] = os.path.basename(roidb[0]['image'])
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/site-packages/numpy/lib/twodim_base.py | 2 | 27339 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
kaichogami/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
skoslowski/gnuradio | gr-filter/examples/fir_filter_fff.py | 3 | 3371 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print("Num. Taps: ", len(taps))
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=10000,
help="Number of samples to process [default=%(default)r]")
parser.add_argument("-s", "--samplerate", type=eng_float, default=8000,
help="System sample rate [default=%(default)r]")
parser.add_argument("-B", "--bandwidth", type=eng_float, default=1000,
help="Filter bandwidth [default=%(default)r]")
parser.add_argument("-T", "--transition", type=eng_float, default=100,
help="Transition band [default=%(default)r]")
parser.add_argument("-A", "--attenuation", type=eng_float, default=80,
help="Stopband attenuation [default=%(default)r]")
parser.add_argument("-D", "--decimation", type=int, default=1,
help="Decmation factor [default=%(default)r]")
args = parser.parse_args()
put = example_fir_filter_fff(args.nsamples,
args.samplerate,
args.bandwidth,
args.transition,
args.attenuation,
args.decimation)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_snk = numpy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
f2 = pyplot.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
glennlive/gnuradio-wg-grc | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
ml-lab/pylearn2 | pylearn2/testing/skip.py | 49 | 1363 | """
Helper functions for determining which tests to skip.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from nose.plugins.skip import SkipTest
import os
from theano.sandbox import cuda
scipy_works = True
try:
import scipy
except ImportError:
# pyflakes gets mad if you set scipy to None here
scipy_works = False
sklearn_works = True
try:
import sklearn
except ImportError:
sklearn_works = False
h5py_works = True
try:
import h5py
except ImportError:
h5py_works = False
matplotlib_works = True
try:
from matplotlib import pyplot
except ImportError:
matplotlib_works = False
def skip_if_no_data():
if 'PYLEARN2_DATA_PATH' not in os.environ:
raise SkipTest()
def skip_if_no_scipy():
if not scipy_works:
raise SkipTest()
def skip_if_no_sklearn():
if not sklearn_works:
raise SkipTest()
def skip_if_no_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled.')
def skip_if_no_h5py():
if not h5py_works:
raise SkipTest()
def skip_if_no_matplotlib():
if not matplotlib_works:
raise SkipTest("matplotlib and pyplot are not available")
| bsd-3-clause |
charlesll/RamPy | legacy_code/IR_dec_comb.py | 1 | 6585 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 22 07:54:05 2014
@author: charleslelosq
Carnegie Institution for Science
"""
import sys
sys.path.append("/Users/charleslelosq/Documents/RamPy/lib-charles/")
import csv
import numpy as np
import scipy
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
from StringIO import StringIO
from scipy import interpolate
# to fit spectra we use the lmfit software of Matt Newville, CARS, university of Chicago, available on the web
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, fit_report
from spectratools import * #Charles' libraries and functions
from Tkinter import *
import tkMessageBox
from tkFileDialog import askopenfilename
#### We define a set of functions that will be used for fitting data
#### unfortunatly, as we use lmfit (which is convenient because it can fix or release
#### easily the parameters) we are not able to use arrays for parameters...
#### so it is a little bit long to write all the things, but in a way quite robust also...
#### gaussian and pseudovoigt functions are available in spectratools
#### if you need a voigt, fix the gaussian-to-lorentzian ratio to 1 in the parameter definition before
#### doing the data fit
def residual(pars, x, data=None, eps=None):
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
f1 = pars['f1'].value
f2 = pars['f2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
# Gaussian model
peak1 = gaussian(x,a1,f1,l1)
peak2 = gaussian(x,a2,f2,l2)
model = peak1 + peak2
if data is None:
return model, peak1, peak2
if eps is None:
return (model - data)
return (model - data)/eps
##### CORE OF THE CALCULATION BELOW
#### CALLING THE DATA NAMES
tkMessageBox.showinfo(
"Open file",
"Please open the list of spectra")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
with open(filename) as inputfile:
results = list(csv.reader(inputfile)) # we read the data list
#### LOOP FOR BEING ABLE TO TREAT MULTIPLE DATA
#### WARNING: OUTPUT ARE AUTOMATICALLY GENERATED IN A DIRECTORY CALLED "DECONV"
#### (see end) THAT SHOULD BE PRESENT !!!!!!!!!!
for lg in range(len(results)):
name = str(results[lg]).strip('[]')
name = name[1:-1] # to remove unwanted ""
sample = np.genfromtxt(name) # get the sample to deconvolute
# we set here the lower and higher bonds for the interest region
lb = 4700 ### MAY NEED TO AJUST THAT
hb = 6000
interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))]
ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese as a percentage, we assume that the treatment was made correctly for error determination... if not, please put sigma = None
interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after
sigma = abs(ese0*interestspectra[:,1]) #calculate good ese
#sigma = None # you can activate that if you are not sure about the errors
xfit = interestspectra[:,0] # region to be fitted
data = interestspectra[:,1] # region to be fitted
params = Parameters()
####################### FOR MELT:
####################### COMMENT IF NOT WANTED
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('a1', 1, True, 0, None, None),
('f1', 5200, True, 750, None, None),
('l1', 1, True, 0, None, None),
('a2', 1, True, 0, None, None),
('f2', 5400, True, None, None, None),
('l2', 1, True, None, None, None))
result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy
model = fit_report(params) # the report
yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks
#### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians
# Select interest areas for calculating the areas of OH and H2Omol peaks
intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))]
area4500 = np.trapz(intarea45[:,1],intarea45[:,0])
esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area)
# now for the gaussians
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
AireG1 = gaussianarea(a1,l1)
AireG2 = gaussianarea(a2,l2)
##### WE DO A NICE FIGURE THAT CAN BE IMPROVED FOR PUBLICATION
fig = figure()
plot(sample[:,0],sample[:,1],'k-')
plot(xfit,yout,'r-')
plot(xfit,peak1,'b-')
plot(xfit,peak2,'b-')
xlim(lb,hb)
ylim(0,np.max(sample[:,1]))
xlabel("Wavenumber, cm$^{-1}$", fontsize = 18, fontweight = "bold")
ylabel("Absorption, a. u.", fontsize = 18, fontweight = "bold")
text(4000,np.max(intarea45[:,1])+0.03*np.max(intarea45[:,1]),('Area OH: \n'+'%.1f' % area4500),color='b',fontsize = 16)
text(4650,a1 + 0.05*a1,('Area pic 1$: \n'+ '%.1f' % AireG1),color='b',fontsize = 16)
text(5000,a2 + 0.05*a2,('OH/H$_2$O$_{mol}$: \n'+'%.3f' % ratioOH_H2O+'\n+/-'+'%.3f' % eseratioOH_H2O),color='r',fontsize = 16)
##### output of data, fitted peaks, parameters, and the figure in pdf
##### all goes into the ./deconv/ folder
name.rfind('/')
nameout = name[name.rfind('/')+1::]
namesample = nameout[0:nameout.find('.')]
pathint = str('/deconv/') # the output folder
ext1 = '_ydec.txt'
ext2 = '_params.txt'
ext3 = '.pdf'
pathout1 = pathbeg+pathint+namesample+ext1
pathout2 = pathbeg+pathint+namesample+ext2
pathout3 = pathbeg+pathint+namesample+ext3
matout = np.vstack((xfit,data,yout,peak1,peak2))
matout = np.transpose(matout)
np.savetxt(pathout1,matout) # saving the arrays of spectra
fd = os.open( pathout2, os.O_RDWR|os.O_CREAT ) # Open a file and create it if it do not exist
fo = os.fdopen(fd, "w+") # Now get a file object for the above file.
fo.write(model) # write the parameters in it
fo.close()
savefig(pathout3) # save the figure
| gpl-2.0 |
vybstat/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pylab_examples/multi_image.py | 12 | 2201 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, axes, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
class ImageFollower:
'update image in response to changes in clim or cmap on another image'
def __init__(self, follower):
self.follower = follower
def __call__(self, leader):
self.follower.set_cmap(leader.get_cmap())
self.follower.set_clim(leader.get_clim())
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].callbacksSM.connect('changed', ImageFollower(im))
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this interactively and
# modify the colormap:
axes(ax[0]) # Return the current axes to the first one,
sci(images[0]) # because the current image must be in current axes.
show()
| apache-2.0 |
yask123/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
github4ry/pathomx | pathomx/kernel_helpers.py | 2 | 3634 | import os
import sys
import numpy as np
import pandas as pd
import re
import io
from matplotlib.figure import Figure, AxesStack
from matplotlib.axes import Subplot
from mplstyler import StylesManager
import warnings
from . import displayobjects
from .utils import scriptdir, basedir
from IPython.core import display
from copy import deepcopy
MAGIC_TYPES = [
# Numpy
np.array, np.ndarray,
# Pandas
pd.Series, pd.DataFrame,
Figure, Subplot,
StylesManager,
# View types
displayobjects.Svg, displayobjects.Html, displayobjects.Markdown,
display.SVG
]
class PathomxTool(object):
''' Simple wrapper class that holds the output data for a given tool; This is for user-friendliness
not for use '''
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __init__(self, name, *args, **kwargs):
self.__dict__.update(kwargs)
self._name = name
def pathomx_notebook_start(vars):
#for k, v in varsi.items():
# vars[k] = v
# _keep_input_vars = ['styles']
# vars['_pathomx_exclude_input_vars'] = [x for x in varsi.keys() if x not in _keep_input_vars]
# Handle IO magic
if '_io' in vars:
for k, v in vars['_io']['input'].items():
if v in vars:
vars[k] = deepcopy(vars[v])
else:
vars[k] = None
if '_rcParams' in vars:
global rcParams
from matplotlib import rcParams
# Block warnings from deprecated rcParams here
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for k, v in vars['_rcParams'].items():
rcParams[k] = v
# Legacy shim
if '_styles' in vars:
vars['styles'] = vars['_styles']
def pathomx_notebook_stop(vars):
varso = {}
if '_io' in vars:
# Handle IO magic
for k, v in vars['_io']['output'].items():
if k in vars:
vars[v] = vars[k]
else:
vars[v] = None
for k, v in vars.items():
# Check it's an accepted type for passing; and not private (starts with _)
if not k.startswith('_') and \
not k in vars['_io']['input'].keys():
if type(v) in MAGIC_TYPES or k in vars['_pathomx_expected_output_vars']:
varso[k] = v
elif hasattr(v, '_repr_html_'):
try:
# Check if it is a bound method (not a class definition)
v._repr_html_()
except:
pass
else:
varso[k] = displayobjects.Html(v)
vars['varso'] = varso
def progress(progress):
''' Output the current progress to stdout on the remote core
this will be read from stdout and displayed in the UI '''
print("____pathomx_execute_progress_%.2f____" % progress)
class open_with_progress(io.IOBase):
def __init__(self, f, *args, **kwargs):
super(open_with_progress, self).__init__(f, *args, **kwargs)
self._fsize = os.path.getsize(f)
self._progress = None
def read(self, *args, **kwargs):
super(open_with_progress, self).read(*args, **kwargs)
self.check_and_emit_progress()
def check_and_emit_progress(self):
# We only output at 2dp so only emit when that changes
prg = round(self.tell() / self._fsize, 2)
if prg != self._progress:
self._progress = prg
progress(prg)
| gpl-3.0 |
GermanRuizMarcos/Classical-Composer-Classification | code_10_1/classification.py | 1 | 30838 | '''
AUDIO CLASSICAL COMPOSER IDENTIFICATION BASED ON:
A SPECTRAL BANDWISE FEATURE-BASED SYSTEM
'''
import essentia
from essentia.standard import *
import glob
import numpy as np
import arff
from scipy import stats
import collections
import cv2
import matplotlib
import matplotlib.pyplot as plt
#### gabor filters
def build_filters():
filters = []
ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def process(img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
np.maximum(accum, fimg, accum)
return accum
###
# Dataset creation with specific attributes (spectral features) and a specific class (composer's name)
'''
Audio files trasformed into the frequency domain through a 1024-sample STFT with 50% overlap.
The spectrum is divided into 50 mel-spaced bands.
'''
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/bach/*.wav")
fft = FFT()
melbands = MelBands(numberBands = 50)
flatness = FlatnessDB()
rolloff = RollOff()
centroid = SpectralCentroidTime()
flux = Flux()
energy = EnergyBand()
zero = ZeroCrossingRate()
spectrum = Spectrum()
w = Windowing(type = 'hann')
mfcc = MFCC()
silence = SilenceRate(thresholds = [0.01])
f = open('definitive_train.txt', 'wb')
f.write('@RELATION "composer dataset"\n')
f.write('\n')
f.write('@ATTRIBUTE filename STRING\n')
f.write('@ATTRIBUTE MFCC-0 REAL\n')
f.write('@ATTRIBUTE MFCC-1 REAL\n')
f.write('@ATTRIBUTE MFCC-2 REAL\n')
f.write('@ATTRIBUTE MFCC-3 REAL\n')
f.write('@ATTRIBUTE MFCC-4 REAL\n')
f.write('@ATTRIBUTE MFCC-5 REAL\n')
f.write('@ATTRIBUTE MFCC-6 REAL\n')
f.write('@ATTRIBUTE MFCC-7 REAL\n')
f.write('@ATTRIBUTE MFCC-8 REAL\n')
f.write('@ATTRIBUTE MFCC-9 REAL\n')
f.write('@ATTRIBUTE MFCC-10 REAL\n')
f.write('@ATTRIBUTE MFCC-11 REAL\n')
f.write('@ATTRIBUTE MFCC-12 REAL\n')
f.write('@ATTRIBUTE flatness-mean REAL\n')
f.write('@ATTRIBUTE flatness-variance REAL\n')
f.write('@ATTRIBUTE rolloff-mean REAL\n')
f.write('@ATTRIBUTE rolloff-variance REAL\n')
f.write('@ATTRIBUTE centroid-mean REAL\n')
f.write('@ATTRIBUTE centroid-variance REAL\n')
f.write('@ATTRIBUTE flux-mean REAL\n')
f.write('@ATTRIBUTE flux-variance REAL\n')
f.write('@ATTRIBUTE energy-mean REAL\n')
f.write('@ATTRIBUTE energy-variance REAL\n')
f.write('@ATTRIBUTE ZCR-mean REAL\n')
f.write('@ATTRIBUTE ZCR-variance REAL\n')
f.write('@ATTRIBUTE flatness-std REAL\n')
f.write('@ATTRIBUTE flatness-hmean REAL\n')
f.write('@ATTRIBUTE silences REAL\n')
f.write('@ATTRIBUTE gaborfilter-mean REAL\n')
f.write('@ATTRIBUTE gaborfilter-variance REAL\n')
f.write('@ATTRIBUTE composer {bach, beethoven, chopin, haydn, liszt, mendelssohn, mozart, vivaldi}\n')
f.write('\n')
f.write('@DATA\n')
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/bach'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'bach'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('bach')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 2
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/beethoven/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/beethoven'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'beethoven'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('beethoven')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 3
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/chopin/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/chopin'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'chopin'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('chopin')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 4
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/haydn/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/haydn'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'haydn'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('haydn')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
'''
# 5
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/liszt/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/liszt'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'liszt'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
'''
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('liszt')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 6
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/mendelssohn/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/mendelssohn'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'mendelssohn'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('mendelssohn')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 7
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/mozart/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/mozart'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'mozart'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('mozart')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 8
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/vivaldi/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/vivaldi'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'vivaldi'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('vivaldi')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
f.write('%\n')
f.write('%\n')
f.write('%\n')
f.close()
| gpl-3.0 |
waynenilsen/statsmodels | examples/python/robust_models_0.py | 33 | 2992 |
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
| bsd-3-clause |
rvbelefonte/Rockfish2 | rockfish2/extensions/cps/model.py | 1 | 3390 | """
Tools for working with Computer Programs in Seismology velocity models
"""
import os
import numpy as np
import datetime
import pandas as pd
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from rockfish2 import logging
from rockfish2.models.profile import Profile
class CPSModel1d(Profile):
def __init__(self, *args, **kwargs):
self.NAME = kwargs.pop('name', '1D model')
self.UNITS = kwargs.pop('units', 'KGS')
self.ISOTROPY = kwargs.pop('isotropy', 'ISOTROPIC')
self.SHAPE = kwargs.pop('shape', 'FLAT EARTH')
self.DIM = kwargs.pop('dim', '1-D')
Profile.__init__(self, *args, **kwargs)
def __str__(self):
return self.write()
def write(self, path_or_buf=None, float_format='%10.6f', **kwargs):
"""
Write profile to the Computer Programs in Seismology model format
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
"""
model = self.model.copy()
col = ['hr'] + [k for k in model if k != 'hr']
model['hr'] = np.concatenate((np.diff(np.asarray(model.index)), [0.0]))
model.index = np.arange(len(model))
#model = model[0:len(model) - 1]
sng = "MODEL\n"
sng += "{:}\n".format(self.NAME)
sng += "{:}\n".format(self.ISOTROPY)
sng += "{:}\n".format(self.UNITS)
sng += "{:}\n".format(self.SHAPE)
sng += "{:}\n".format(self.DIM)
sng += "CONSTANT VELOCITY\n"
sng += "#\n"
sng += "Created by: {:}{:}\n"\
.format(self.__module__, self.__class__.__name__)
sng += "Created on: {:}\n".format(datetime.datetime.now())
sng += "#\n"
sng += model[col].to_csv(sep='\t', index=False,
float_format=float_format, **kwargs)
if path_or_buf is None:
return sng
if hasattr(path_or_buf, 'write'):
path_or_buf.write(sng)
else:
f = open(path_or_buf, 'w')
f.write(sng)
def read(self, filename, sep='\t'):
"""
Write profile from the Computer Programs in Seismology model format
"""
f = open(filename, 'rb')
kind = f.readline().replace('\n', '')
assert kind.startswith('MODEL'),\
'File does not appear to be CPS format'
self.NAME = f.readline().replace('\n', '')
self.ISOTROPY = f.readline().replace('\n', '')
self.UNITS = f.readline().replace('\n', '')
self.SHAPE = f.readline().replace('\n', '')
self.DIM = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
cols = f.readline().replace('\n', '').split()
self.model = pd.read_csv(filename, sep=sep, skiprows=11,
index_col=0)
try:
dz = self.model.index[:]
z = np.cumsum(np.asarray(dz)) - dz[0]
if z[-1] == 0:
z[-1] = dz[-2]
self.model.index = z
self.model.index.name = 'depth'
except:
pass
| gpl-2.0 |
CVML/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
r-rathi/error-control-coding | perf/plot-pegd.py | 1 | 1496 | import numpy as np
import matplotlib.pyplot as plt
from errsim import *
def label(d, pe, pb, n):
if pb is None:
pb = pe
label = 'd={} pe={} n={} BSC'.format(d, pe, n)
else:
label = 'd={} pe={} n={} pb={}'.format(d, pe, n, pb)
return label
def plot(pe, fpath=None):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
r = np.arange(8, 65)
pWL = jointpmf5(pe, pe, 128)
ax.plot(r, r_vs_pegd(pWL, 3, r) , 'g--', lw=2, label=label(3, pe, None, 128))
ax.plot(r, r_vs_pegd(pWL, 6, r) , 'g-', lw=2, label=label(6, pe, None, 128))
pWL = jointpmf5(pe, .1, 128)
ax.plot(r, r_vs_pegd(pWL, 3, r) , 'b--', lw=2, label=label(3, pe, .1, 128))
ax.plot(r, r_vs_pegd(pWL, 6, r) , 'b-', lw=2, label=label(6, pe, .1, 128))
pWL = jointpmf5(pe, .5, 128)
ax.plot(r, r_vs_pegd(pWL, 3, r) , 'r--', lw=2, label=label(3, pe, .5, 128))
ax.plot(r, r_vs_pegd(pWL, 6, r) , 'r-', lw=2, label=label(6, pe, .5, 128))
ax.set_yscale('log')
ax.set_xticks(r[::8])
ax.set_xlim(r[0], r[-1])
#ax.set_ylim(1e-30, 1e-1)
ax.set_xlabel('Burst error correction capability, $r$')
ax.set_ylabel('$P_{egd}$')
ax.set_title('Probability of Exceeding Guarenteed Error Detection Capability')
ax.legend(loc='lower right')
ax.grid(True)
#plt.tight_layout()
if fpath:
fig.savefig(fpath)
plt.show()
plt.close('all')
plot(1e-15, 'plots/pegd-pe=1e15.png')
plot(1e-6, 'plots/pegd-pe=1e6.png')
| mit |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dviread.py | 69 | 29920 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor.
Interface::
dvi = Dvi(filename, 72)
for page in dvi: # iterate over pages
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
import numpy as np
import struct
import subprocess
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, pages) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h = _mul2012(font._scale, font._tfm.height[g])
e = _mul2012(font._scale, font._tfm.depth[g])
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=maxy-maxy_pure)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=(maxy-maxy_pure)*d)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1))
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument "nbytes" long.
Signedness is determined by the "signed" keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode "byte", read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError, "unknown command: byte %d"%byte
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of dvi file"
if i != 2:
raise ValueError, "Unknown dvi format %d"%i
if num != 25400000 or den != 7227 * 2**16:
raise ValueError, "nonstandard units in dvi file"
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError, "nonstandard magnification in dvi file"
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_char in dvi file"
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_rule in dvi file"
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_char in dvi file"
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_rule in dvi file"
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError, \
"misplaced bop in dvi file (state %d)" % self.state
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced eop in dvi file"
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced push in dvi file"
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced pop in dvi file"
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced right in dvi file"
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced w in dvi file"
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced x in dvi file"
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced down in dvi file"
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced y in dvi file"
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced z in dvi file"
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced fnt_num in dvi file"
self.f = k
def _xxx(self, special):
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:])
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError, 'tfm checksum mismatch: %s'%n
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:])
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError, "misplaced post in dvi file"
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are _not_ used for comparison.
The size is in Adobe points (converted from TeX points).
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(tfm.width.iterkeys())
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in range(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError, "Packet length mismatch in vf file"
else:
if byte in (139, 140) or byte >= 243:
raise ValueError, "Inappropriate opcode %d in vf file" % byte
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError, "unknown vf opcode %d" % byte
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError, "Misplaced packet in vf file"
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of vf file"
if i != 202:
raise ValueError, "Unknown vf format %d" % i
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
Attributes:
checksum: for verifying against dvi file
design_size: design size of the font (in what units?)
width[i]: width of character \#i, needs to be scaled
by the factor specified in the dvi file
(this is a dict because indexing may not start from 0)
height[i], depth[i]: height and depth of character \#i
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
file = open(filename, 'rb')
try:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack('!6H', header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack('!2I', header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
finally:
file.close()
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack('!%dI' % (len(x)/4), x)
for x in (widths, heights, depths) ]
for i in range(ec-bc):
self.width[bc+i] = _fix2comp(widths[ord(char_info[4*i])])
self.height[bc+i] = _fix2comp(heights[ord(char_info[4*i+1]) >> 4])
self.depth[bc+i] = _fix2comp(depths[ord(char_info[4*i+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage: map = PsfontsMap('.../psfonts.map'); map['cmr10']
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts, while
the pdf-related files perhaps only avoid the "Base 14" pdf fonts.
But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
file = open(filename, 'rt')
try:
self._parse(file)
finally:
file.close()
def __getitem__(self, texname):
result = self._font[texname]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
texname, psname = words[:2]
effects, encoding, filename = [], None, None
for word in words[2:]:
if not word.startswith('<'):
effects.append(word)
else:
word = word.lstrip('<')
if word.startswith('['):
assert encoding is None
encoding = word[1:]
elif word.endswith('.enc'):
assert encoding is None
encoding = word
else:
assert filename is None
filename = word
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
file = open(filename, 'rt')
try:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + `self.encoding`, 'debug-annoying')
finally:
file.close()
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError, "Broken name in encoding file: " + w
return result
def find_tex_file(filename, format=None):
"""
Call kpsewhich to find a file in the texmf tree.
If format is not None, it is used as the value for the --format option.
See the kpathsea documentation for more information.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result
def _read_nointr(pipe, bufsize=-1):
while True:
try:
return pipe.read(bufsize)
except OSError, e:
if e.errno == errno.EINTR:
continue
else:
raise
# With multiple text objects per figure (e.g. tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print '=== new page ==='
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print 'font', f.texname, 'scaled', f._scale/pow(2.0,20)
fPrev = f
print x,y,c, 32 <= c < 128 and chr(c) or '.', w
for x,y,w,h in page.boxes:
print x,y,'BOX',w,h
| agpl-3.0 |